code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Dict = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 5_0257 , __lowerCAmelCase : int = 1024 , __lowerCAmelCase : int = 768 , __lowerCAmelCase : int = 12 , __lowerCAmelCase : int = 12 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "gelu_new" , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 1e-5 , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , ):
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , __lowerCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=__lowerCAmelCase , n_positions=__lowerCAmelCase , n_embd=__lowerCAmelCase , n_layer=__lowerCAmelCase , n_head=__lowerCAmelCase , n_inner=__lowerCAmelCase , activation_function=__lowerCAmelCase , resid_pdrop=__lowerCAmelCase , embd_pdrop=__lowerCAmelCase , attn_pdrop=__lowerCAmelCase , layer_norm_epsilon=__lowerCAmelCase , initializer_range=__lowerCAmelCase , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , scale_attn_by_inverse_layer_idx=__lowerCAmelCase , reorder_and_upcast_attn=__lowerCAmelCase , )
_UpperCAmelCase = GPTaLMHeadModel(__lowerCAmelCase )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , ):
_UpperCAmelCase = self.transformer.transformer.wte(__lowerCAmelCase )
_UpperCAmelCase = self.encode_prefix(__lowerCAmelCase )
_UpperCAmelCase = self.decode_prefix(__lowerCAmelCase )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=__lowerCAmelCase , labels=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : torch.device ):
return torch.zeros(__lowerCAmelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
return self.encode_prefix(__lowerCAmelCase )
@torch.no_grad()
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = torch.split(__lowerCAmelCase , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(__lowerCAmelCase ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=__lowerCAmelCase , device=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(__lowerCAmelCase )
_UpperCAmelCase = torch.stack(__lowerCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : int = 5 , __lowerCAmelCase : int = 67 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : Optional[int] = None , ):
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase , dtype=torch.int )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase , device=__lowerCAmelCase , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
_UpperCAmelCase = self.transformer(inputs_embeds=__lowerCAmelCase )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(__lowerCAmelCase , -1 )
_UpperCAmelCase = generated.expand(__lowerCAmelCase , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(__lowerCAmelCase , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(__lowerCAmelCase , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(__lowerCAmelCase ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=__lowerCAmelCase )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(__lowerCAmelCase , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [1]
for i in range(2 ,lowercase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_UpperCAmelCase = []
_UpperCAmelCase = list(range(lowercase ) )
# Find permutation
while factorials:
_UpperCAmelCase = factorials.pop()
_UpperCAmelCase , _UpperCAmelCase = divmod(lowercase ,lowercase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
create_all_state(1 ,lowercase ,lowercase ,[] ,lowercase )
return result
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowercase ,total_number - level + 2 ):
current_list.append(lowercase )
create_all_state(i + 1 ,lowercase ,level - 1 ,lowercase ,lowercase )
current_list.pop()
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
for i in total_list:
print(*lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = 4
UpperCAmelCase__ = 2
UpperCAmelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 30
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a ( lowerCAmelCase_ ):
_snake_case : Dict = 'time_series_transformer'
_snake_case : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "student_t" , __lowerCAmelCase : str = "nll" , __lowerCAmelCase : int = 1 , __lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCAmelCase : Optional[Union[str, bool]] = "mean" , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "gelu" , __lowerCAmelCase : int = 64 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : int = 100 , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : int=True , **__lowerCAmelCase : List[str] , ):
# time series specific configuration
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length or prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(__lowerCAmelCase ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 30
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCAmelCase__ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def lowerCAmelCase_ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : int ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_UpperCAmelCase = FlaxBertModel(__lowerCAmelCase )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
_UpperCAmelCase = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
_UpperCAmelCase = flatten_dict(unfreeze(model.params ) )
_UpperCAmelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCAmelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase , repo_id="""test-model-flax""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_UpperCAmelCase = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
_UpperCAmelCase = flatten_dict(unfreeze(model.params ) )
_UpperCAmelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCAmelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1e-3 , msg=f'''{key} not identical''' )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_UpperCAmelCase = FlaxBertModel(__lowerCAmelCase )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
_UpperCAmelCase = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_UpperCAmelCase = flatten_dict(unfreeze(model.params ) )
_UpperCAmelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCAmelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__lowerCAmelCase , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_UpperCAmelCase = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_UpperCAmelCase = flatten_dict(unfreeze(model.params ) )
_UpperCAmelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCAmelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1e-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = True
_UpperCAmelCase = flatten_dict(modela.params )
_UpperCAmelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
_UpperCAmelCase = False
return models_are_equal
@require_flax
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_UpperCAmelCase = FlaxBertModel(__lowerCAmelCase )
_UpperCAmelCase = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
_UpperCAmelCase = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertTrue(check_models_equal(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_UpperCAmelCase = FlaxBertModel(__lowerCAmelCase )
_UpperCAmelCase = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , max_shard_size="""10KB""" )
with self.assertRaises(__lowerCAmelCase ):
_UpperCAmelCase = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertTrue(check_models_equal(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = """bert"""
_UpperCAmelCase = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(__lowerCAmelCase ):
_UpperCAmelCase = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = """bert"""
_UpperCAmelCase = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(__lowerCAmelCase ):
_UpperCAmelCase = FlaxBertModel.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
| 30
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 1
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
_UpperCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
_UpperCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
_UpperCAmelCase = len(str(lowercase ).split(""".""" )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 30
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 1
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase__ = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCAmelCase__ = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCAmelCase__ = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCAmelCase__ = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCAmelCase__ = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=[1, 10, 100] , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__lowerCAmelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__lowerCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__lowerCAmelCase , *__lowerCAmelCase )
futures.append(__lowerCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowerCAmelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase , _UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__lowerCAmelCase ) )
correct.append(sum(__lowerCAmelCase ) )
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f'''pass@{k}''': estimate_pass_at_k(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
def estimator(lowercase ,lowercase ,lowercase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(lowercase ,lowercase ):
_UpperCAmelCase = itertools.repeat(lowercase ,len(lowercase ) )
else:
assert len(lowercase ) == len(lowercase )
_UpperCAmelCase = iter(lowercase )
return np.array([estimator(int(lowercase ) ,int(lowercase ) ,lowercase ) for n, c in zip(lowercase ,lowercase )] )
| 30
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase__ = """docs/source/en/_toctree.yml"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = defaultdict(lowercase )
for doc in model_doc:
counts[doc["local"]] += 1
_UpperCAmelCase = [key for key, value in counts.items() if value > 1]
_UpperCAmelCase = []
for duplicate_key in duplicates:
_UpperCAmelCase = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(lowercase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(lowercase ,key=lambda lowercase : s["title"].lower() )
def __UpperCAmelCase ( lowercase=False ):
"""simple docstring"""
with open(lowercase ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_UpperCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_UpperCAmelCase = api_doc[model_idx]["""sections"""]
_UpperCAmelCase = [(idx, section) for idx, section in enumerate(lowercase ) if """sections""" in section]
_UpperCAmelCase = False
for idx, modality_doc in modalities_docs:
_UpperCAmelCase = modality_doc["""sections"""]
_UpperCAmelCase = clean_model_doc_toc(lowercase )
if old_modality_doc != new_modality_doc:
_UpperCAmelCase = True
if overwrite:
_UpperCAmelCase = new_modality_doc
if diff:
if overwrite:
_UpperCAmelCase = model_doc
_UpperCAmelCase = api_doc
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase ,allow_unicode=lowercase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 30
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# Checks if the entire collection has been sorted
if len(lowercase ) <= 1 or n <= 1:
return
insert_next(lowercase ,n - 1 )
rec_insertion_sort(lowercase ,n - 1 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# Checks order between adjacent elements
if index >= len(lowercase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_UpperCAmelCase , _UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowercase ,index + 1 )
if __name__ == "__main__":
UpperCAmelCase__ = input("""Enter integers separated by spaces: """)
UpperCAmelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 30
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_UpperCAmelCase = []
for char_count in range(lowercase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowercase )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 30
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , ):
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 99
_UpperCAmelCase = 32
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 37
_UpperCAmelCase = """gelu"""
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 512
_UpperCAmelCase = 16
_UpperCAmelCase = 2
_UpperCAmelCase = 0.02
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = TFEsmModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : int , ):
_UpperCAmelCase = True
_UpperCAmelCase = TFEsmModel(config=__lowerCAmelCase )
_UpperCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = TFEsmForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFEsmForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : List[str] = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : str = False
_snake_case : Optional[int] = False
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = TFEsmModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCAmelCase = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
_UpperCAmelCase = model.get_output_embeddings()
assert x is None
_UpperCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
_UpperCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_UpperCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
_UpperCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 30
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 1
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase__ = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
UpperCAmelCase__ = {
"""ctrl""": 2_5_6,
}
UpperCAmelCase__ = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(lowercase )
return pairs
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = CONTROL_CODES
def __init__( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple="<unk>" , **__lowerCAmelCase : List[str] ):
super().__init__(unk_token=__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(__lowerCAmelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_UpperCAmelCase = {}
@property
def lowerCAmelCase_ ( self : Any ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__lowerCAmelCase )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__lowerCAmelCase ):
try:
_UpperCAmelCase = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__lowerCAmelCase )
_UpperCAmelCase = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__lowerCAmelCase )
_UpperCAmelCase = """@@ """.join(__lowerCAmelCase )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
return word
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(R"""\S+\n?""" , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Any ):
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int ):
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = """ """.join(__lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + """\n""" )
_UpperCAmelCase = 0
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(__lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 30
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 1
|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
UpperCAmelCase__ = """sshleifer/bart-tiny-random"""
UpperCAmelCase__ = """patrickvonplaten/t5-tiny-random"""
@require_torch
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Any ):
return AutoConfig.from_pretrained(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase_ ( self : List[str] ):
with self.assertRaises(__lowerCAmelCase ):
create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=__lowerCAmelCase , d=__lowerCAmelCase )
| 30
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 1
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self : Dict , *,
__lowerCAmelCase : int = 4 , __lowerCAmelCase : int = 768 , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , ):
super().__init__()
_UpperCAmelCase = nn.Parameter(torch.zeros(__lowerCAmelCase ) )
# parameters for additional clip time embeddings
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# parameters for encoder hidden states
_UpperCAmelCase = clip_extra_context_tokens
_UpperCAmelCase = nn.Linear(
__lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.LayerNorm(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , *, __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCAmelCase = image_embeddings.shape[0]
_UpperCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCAmelCase = classifier_free_guidance_embeddings.expand(
__lowerCAmelCase , -1 )
_UpperCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCAmelCase = self.embedding_proj(__lowerCAmelCase )
_UpperCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(__lowerCAmelCase )
_UpperCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCAmelCase = self.clip_extra_context_tokens_proj(__lowerCAmelCase )
_UpperCAmelCase = clip_extra_context_tokens.reshape(__lowerCAmelCase , -1 , self.clip_extra_context_tokens )
_UpperCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCAmelCase = self.encoder_hidden_states_proj(__lowerCAmelCase )
_UpperCAmelCase = self.text_encoder_hidden_states_norm(__lowerCAmelCase )
_UpperCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase__ = False
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return 12
@property
def lowerCAmelCase_ ( self : str ):
return 12
@property
def lowerCAmelCase_ ( self : Dict ):
return 32
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCAmelCase = 12
_UpperCAmelCase = 12
_UpperCAmelCase = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
_UpperCAmelCase = TransformeraDModel(**__lowerCAmelCase )
return model
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.dummy_vqvae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_transformer
_UpperCAmelCase = VQDiffusionScheduler(self.num_embed )
_UpperCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowerCAmelCase )
_UpperCAmelCase = VQDiffusionPipeline(
vqvae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , transformer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = """teddy bear playing in the pool"""
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_UpperCAmelCase = pipe([prompt] , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" )
_UpperCAmelCase = output.images
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_UpperCAmelCase = pipe(
[prompt] , generator=__lowerCAmelCase , output_type="""np""" , return_dict=__lowerCAmelCase , num_inference_steps=2 )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_UpperCAmelCase = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.dummy_vqvae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_transformer
_UpperCAmelCase = VQDiffusionScheduler(self.num_embed )
_UpperCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowerCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_UpperCAmelCase = VQDiffusionPipeline(
vqvae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , transformer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = """teddy bear playing in the pool"""
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_UpperCAmelCase = pipe([prompt] , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" )
_UpperCAmelCase = output.images
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_UpperCAmelCase = pipe(
[prompt] , generator=__lowerCAmelCase , output_type="""np""" , return_dict=__lowerCAmelCase , num_inference_steps=2 )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_UpperCAmelCase = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
_UpperCAmelCase = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
_UpperCAmelCase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_UpperCAmelCase = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__lowerCAmelCase , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 30
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase__ : Optional[int] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a ( unittest.TestCase ):
_snake_case : Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_snake_case : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_snake_case : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_snake_case : str = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=__a , tokenizer=__a , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(__a , {"""sequence""": ANY(__a ), """labels""": [ANY(__a )], """scores""": [ANY(__a )]} )
# No kwarg
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(__a , {"""sequence""": ANY(__a ), """labels""": [ANY(__a )], """scores""": [ANY(__a )]} )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(__a , {"""sequence""": ANY(__a ), """labels""": [ANY(__a )], """scores""": [ANY(__a )]} )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
__a , {"""sequence""": ANY(__a ), """labels""": [ANY(__a ), ANY(__a )], """scores""": [ANY(__a ), ANY(__a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
__a , {"""sequence""": ANY(__a ), """labels""": [ANY(__a ), ANY(__a )], """scores""": [ANY(__a ), ANY(__a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
_UpperCAmelCase = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(__a , {"""sequence""": ANY(__a ), """labels""": [ANY(__a )], """scores""": [ANY(__a )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
__a , [
{"""sequence""": ANY(__a ), """labels""": [ANY(__a ), ANY(__a )], """scores""": [ANY(__a ), ANY(__a )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
__a , [
{"""sequence""": ANY(__a ), """labels""": [ANY(__a ), ANY(__a )], """scores""": [ANY(__a ), ANY(__a )]}
for i in range(2 )
] , )
with self.assertRaises(__a ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(__a ):
classifier(__a , candidate_labels="""politics""" )
with self.assertRaises(__a ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(__a ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=__a )
with self.assertRaises(__a ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(__a ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=__a , )
self.run_entailment_id(__a )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Pipeline ):
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(__a , zero_shot_classifier.entailment_id )
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__a ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__a ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__a ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__a ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
UpperCAmelCase__ = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
UpperCAmelCase__ = ["""a""", """b""", """c""", """d""", """e"""]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = start
# add current to visited
visited.append(lowercase__ )
_UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_UpperCAmelCase = topological_sort(lowercase__ ,lowercase__ ,lowercase__ )
# if all neighbors visited add current to sort
sort.append(lowercase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowercase__ ) != len(lowercase__ ):
for vertice in vertices:
if vertice not in visited:
_UpperCAmelCase = topological_sort(lowercase__ ,lowercase__ ,lowercase__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase__ = topological_sort("""a""", [], [])
print(sort)
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
UpperCAmelCase__ = """Usage of script: script_name <size_of_canvas:int>"""
UpperCAmelCase__ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [[False for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
return canvas
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
for i, row in enumerate(__lowerCAmelCase ):
for j, _ in enumerate(__lowerCAmelCase ):
_UpperCAmelCase = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCAmelCase ):
for c, pt in enumerate(__lowerCAmelCase ):
_UpperCAmelCase = __judge_point(
__lowerCAmelCase ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_UpperCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_UpperCAmelCase = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_UpperCAmelCase = pt
if pt:
if alive < 2:
_UpperCAmelCase = False
elif alive == 2 or alive == 3:
_UpperCAmelCase = True
elif alive > 3:
_UpperCAmelCase = False
else:
if alive == 3:
_UpperCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
UpperCAmelCase__ = int(sys.argv[1])
# main working structure of this module.
UpperCAmelCase__ = create_canvas(canvas_size)
seed(c)
UpperCAmelCase__ , UpperCAmelCase__ = plt.subplots()
fig.show()
UpperCAmelCase__ = ListedColormap(["""w""", """k"""])
try:
while True:
UpperCAmelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase__ ,architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_UpperCAmelCase = torch.load(hf_hub_download(repo_id=lowerCamelCase__ ,filename="""pytorch_model.bin""" ) )
_UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_UpperCAmelCase = '''roberta_prelayernorm.''' + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_UpperCAmelCase = tensor_value
_UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase__ ,config=lowerCamelCase__ ,state_dict=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
# convert tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
from manim import *
class a ( lowercase__ ):
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCAmelCase = Rectangle(height=0.25 , width=0.25 )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*_a ).arrange(_a , buff=0 )
_UpperCAmelCase = VGroup(*_a ).arrange(_a , buff=0 )
_UpperCAmelCase = VGroup(_a , _a ).arrange(_a , buff=0 )
_UpperCAmelCase = Text("""CPU""" , font_size=24 )
_UpperCAmelCase = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_UpperCAmelCase = [mem.copy() for i in range(4 )]
_UpperCAmelCase = VGroup(*_a ).arrange(_a , buff=0 )
_UpperCAmelCase = Text("""GPU""" , font_size=24 )
_UpperCAmelCase = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*_a ).arrange(_a , buff=0 )
_UpperCAmelCase = Text("""Model""" , font_size=24 )
_UpperCAmelCase = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(_a ):
_UpperCAmelCase = fill.copy().set_fill(_a , opacity=0.8 )
target.move_to(_a )
model_arr.append(_a )
_UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_a )
self.add(*_a , *_a )
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*_a ).arrange(_a , buff=0 )
_UpperCAmelCase = VGroup(*_a ).arrange(_a , buff=0 )
_UpperCAmelCase = VGroup(_a , _a ).arrange(_a , buff=0 )
_UpperCAmelCase = Text("""Disk""" , font_size=24 )
_UpperCAmelCase = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4, -1.25, 0] )
self.add(_a , _a )
_UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_UpperCAmelCase = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_UpperCAmelCase = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_a ) )
_UpperCAmelCase = Square(0.3 )
input.set_fill(_a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _a , buff=0.5 )
self.play(Write(_a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_a , buff=0.02 )
self.play(MoveToTarget(_a ) )
self.play(FadeOut(_a ) )
_UpperCAmelCase = Arrow(start=_a , end=_a , color=_a , buff=0.5 )
a.next_to(model_arr[0].get_left() , _a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_UpperCAmelCase = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
_UpperCAmelCase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_a ) , Circumscribe(model_arr[0] , color=_a , **_a ) , Circumscribe(model_cpu_arr[0] , color=_a , **_a ) , Circumscribe(gpu_rect[0] , color=_a , **_a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_UpperCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_UpperCAmelCase = AnimationGroup(
FadeOut(_a , run_time=0.5 ) , MoveToTarget(_a , run_time=0.5 ) , FadeIn(_a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_UpperCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **_a ) , Circumscribe(cpu_left_col_base[i] , **_a ) , Circumscribe(cpu_left_col_base[i + 1] , color=_a , **_a ) , Circumscribe(gpu_rect[0] , color=_a , **_a ) , Circumscribe(model_arr[i + 1] , color=_a , **_a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_a , **_a ) , Circumscribe(cpu_left_col_base[-1] , color=_a , **_a ) , Circumscribe(gpu_rect[0] , color=_a , **_a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_UpperCAmelCase = a_c
_UpperCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_a ) , FadeOut(_a , run_time=0.5 ) , )
_UpperCAmelCase = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) , MoveToTarget(_a ) )
self.wait()
| 355
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class a ( lowerCamelCase__ ):
_snake_case : List[str] = 'vit'
def __init__( self : Tuple , __lowerCAmelCase : int=768 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Optional[int]=3072 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Tuple=1e-1_2 , __lowerCAmelCase : Any=224 , __lowerCAmelCase : int=16 , __lowerCAmelCase : int=3 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=16 , **__lowerCAmelCase : List[str] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = encoder_stride
class a ( lowerCamelCase__ ):
_snake_case : Tuple = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : str ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Any ):
return 1e-4
| 356
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
UpperCAmelCase__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , ):
_UpperCAmelCase = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_UpperCAmelCase = [[refs[i] for refs in references] for i in range(_snake_case )]
_UpperCAmelCase = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
_UpperCAmelCase = sb_ter.corpus_score(_snake_case , _snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 357
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 0
|
"""simple docstring"""
from math import factorial
def __UpperCAmelCase ( lowercase = 1_00 ):
"""simple docstring"""
return sum(int(lowerCamelCase_ ) for x in str(factorial(lowerCamelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 358
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ): # This function is recursive
"""simple docstring"""
_UpperCAmelCase = len(lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCAmelCase = array[0]
_UpperCAmelCase = False
_UpperCAmelCase = 1
_UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCAmelCase = True
_UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
_UpperCAmelCase = longest_subsequence(lowercase )
if len(lowercase ) > len(lowercase ):
_UpperCAmelCase = temp_array
else:
i += 1
_UpperCAmelCase = [element for element in array[1:] if element >= pivot]
_UpperCAmelCase = [pivot, *longest_subsequence(lowercase )]
if len(lowercase ) > len(lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""PerceiverFeatureExtractor"""]
UpperCAmelCase__ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if any(not isinstance(lowercase__ ,lowercase__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(lowercase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowercase__ ,sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 362
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase=True ):
"""simple docstring"""
model.train()
_UpperCAmelCase = model(_lowerCamelCase )
_UpperCAmelCase = F.mse_loss(_lowerCamelCase ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase )
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
set_seed(42 )
_UpperCAmelCase = RegressionModel()
_UpperCAmelCase = deepcopy(_lowerCamelCase )
_UpperCAmelCase = RegressionDataset(length=80 )
_UpperCAmelCase = DataLoader(_lowerCamelCase ,batch_size=16 )
model.to(accelerator.device )
if sched:
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=1E-3 )
_UpperCAmelCase = AdamW(params=ddp_model.parameters() ,lr=1E-3 )
_UpperCAmelCase = LambdaLR(_lowerCamelCase ,lr_lambda=lambda lowercase : epoch**0.65 )
_UpperCAmelCase = LambdaLR(_lowerCamelCase ,lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_UpperCAmelCase = accelerator.prepare(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else:
_UpperCAmelCase = accelerator.prepare(_lowerCamelCase ,_lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_training_setup(_lowerCamelCase )
# Use a single batch
_UpperCAmelCase = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_UpperCAmelCase = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_training_setup(_lowerCamelCase )
# Use a single batch
_UpperCAmelCase = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_UpperCAmelCase = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def __UpperCAmelCase ( lowercase=False ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = Accelerator(
split_batches=_lowerCamelCase ,dispatch_batches=_lowerCamelCase ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCAmelCase = get_training_setup(_lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
_UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_UpperCAmelCase = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
GradientState._reset_state()
def __UpperCAmelCase ( lowercase=False ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = Accelerator(
split_batches=_lowerCamelCase ,dispatch_batches=_lowerCamelCase ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCAmelCase = get_training_setup(_lowerCamelCase ,_lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
_UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
_UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Accelerator()
_UpperCAmelCase = RegressionDataset(length=80 )
_UpperCAmelCase = DataLoader(_lowerCamelCase ,batch_size=16 )
_UpperCAmelCase = RegressionDataset(length=96 )
_UpperCAmelCase = DataLoader(_lowerCamelCase ,batch_size=16 )
_UpperCAmelCase = accelerator.prepare(_lowerCamelCase ,_lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if iteration < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if batch_num < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Accelerator()
_UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(_lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(_lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ ,f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' ,)
test_gradient_accumulation(_lowerCamelCase ,_lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' ,)
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase ,_lowerCamelCase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 363
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
create_state_space_tree(a_ ,[] ,0 )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if index == len(a_ ):
print(a_ )
return
create_state_space_tree(a_ ,a_ ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(a_ ,a_ ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCAmelCase__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase__ = get_logger()
UpperCAmelCase__ = None
class a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : str , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : str ):
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def lowerCAmelCase_ ( ):
import jax
return {str(_a ): device for device in jax.devices()}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[str] ):
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {'''dtype''': jnp.intaa}
else:
_UpperCAmelCase = {'''dtype''': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Tuple ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , """__array__""" ) and not isinstance(_a , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple ):
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(_a )
_UpperCAmelCase = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(_a )
_UpperCAmelCase = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(_a )
_UpperCAmelCase = self._consolidate(_a )
return column
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(_a )
_UpperCAmelCase = self.python_features_decoder.decode_batch(_a )
_UpperCAmelCase = self.recursive_tensorize(_a )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch
| 365
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 0
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
def is_in_circle(lowercase ,lowercase ) -> bool:
_UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(lowerCAmelCase__ ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 0.0 ,lowercase = 1.0 ,):
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCAmelCase__ ,lowerCAmelCase__ ) ) for _ in range(lowerCAmelCase__ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowercase ,lowercase = 0.0 ,lowercase = 1.0 ):
"""simple docstring"""
def identity_function(lowercase ) -> float:
return x
_UpperCAmelCase = area_under_curve_estimator(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
def function_to_integrate(lowercase ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase = area_under_curve_estimator(
lowerCAmelCase__ ,lowerCAmelCase__ ,0.0 ,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(a_ ) / len(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 0
|
"""simple docstring"""
import os
def __UpperCAmelCase ( lowercase = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCamelCase ) ,_UpperCamelCase ) ) as in_file:
_UpperCAmelCase = in_file.read()
_UpperCAmelCase = [[int(_UpperCamelCase ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
_UpperCAmelCase = [[0 for cell in row] for row in grid]
_UpperCAmelCase = len(grid[0] )
_UpperCAmelCase = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
_UpperCAmelCase = grid[0][0]
for i in range(1 ,_UpperCamelCase ):
_UpperCAmelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 ,_UpperCamelCase ):
_UpperCAmelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 ,_UpperCamelCase ):
for j in range(1 ,_UpperCamelCase ):
_UpperCAmelCase = grid[i][j] + min(dp[i - 1][j] ,dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 368
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCAmelCase = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
_UpperCAmelCase = min(a__ ,a__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# preprocessing the first row
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__SCREAMING_SNAKE_CASE ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a ( __lowercase ):
_snake_case : Union[str, Any] = 'sew-d'
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=32 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : int=512 , __lowerCAmelCase : Any=256 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=("p2c", "c2p") , __lowerCAmelCase : Optional[int]="layer_norm" , __lowerCAmelCase : int="gelu_python" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : str=1e-7 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : Dict="group" , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCAmelCase : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCAmelCase : Tuple=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCAmelCase : Any=False , __lowerCAmelCase : Optional[Any]=128 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=0.05 , __lowerCAmelCase : Dict=10 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Any=10 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Optional[Any]="mean" , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Any=1 , __lowerCAmelCase : List[str]=2 , **__lowerCAmelCase : Tuple , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(_a )
_UpperCAmelCase = list(_a )
_UpperCAmelCase = list(_a )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = squeeze_factor
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = position_buckets
_UpperCAmelCase = share_att_key
_UpperCAmelCase = relative_attention
_UpperCAmelCase = norm_rel_ebd
_UpperCAmelCase = list(_a )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = feature_layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# sequence classification
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_UpperCAmelCase = os.path.abspath(__lowerCamelCase )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
_UpperCAmelCase = torch.load(__lowerCamelCase ,map_location="""cpu""" )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
_UpperCAmelCase = convert_pytorch_state_dict_to_flax(__lowerCamelCase ,__lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_UpperCAmelCase = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase ,__lowerCamelCase )
return flax_state_dict
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(lowercase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_UpperCAmelCase = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_UpperCAmelCase = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_UpperCAmelCase = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_UpperCAmelCase = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
_UpperCAmelCase = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
_UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_UpperCAmelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_UpperCAmelCase = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_UpperCAmelCase = pt_tuple_key[-2] + "_v"
if name is not None:
_UpperCAmelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# convert pytorch tensor to numpy
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
_UpperCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_UpperCAmelCase = flax_model.params["params"]
else:
_UpperCAmelCase = flax_model.params
_UpperCAmelCase = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_UpperCAmelCase = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(__lowerCamelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_UpperCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_UpperCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
_UpperCAmelCase = rename_key_and_reshape_tensor(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# add model prefix if necessary
_UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_UpperCAmelCase = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase ,__lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
import torch
# Load the index
_UpperCAmelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
_UpperCAmelCase = torch.load(__lowerCamelCase )
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
_UpperCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_UpperCAmelCase = flax_model.params["params"]
_UpperCAmelCase = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_UpperCAmelCase = flax_model.params
_UpperCAmelCase = flatten_dict(__lowerCamelCase )
_UpperCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_UpperCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_UpperCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
_UpperCAmelCase = rename_key_and_reshape_tensor(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# add model prefix if necessary
_UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_UpperCAmelCase = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
_UpperCAmelCase = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase ,__lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = os.path.abspath(__lowerCamelCase )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
_UpperCAmelCase = getattr(__lowerCamelCase ,"""Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase ,"""rb""" ) as state_f:
try:
_UpperCAmelCase = from_bytes(__lowerCamelCase ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__lowerCamelCase ,__lowerCamelCase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda lowercase : x.dtype == jnp.bfloataa ,__lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_UpperCAmelCase = jax.tree_util.tree_map(
lambda lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,__lowerCamelCase )
_UpperCAmelCase = flatten_dict(__lowerCamelCase )
_UpperCAmelCase = pt_model.state_dict()
_UpperCAmelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_UpperCAmelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_UpperCAmelCase = []
_UpperCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_UpperCAmelCase = flax_key_tuple[0] == pt_model.base_model_prefix
_UpperCAmelCase = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCAmelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCAmelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
_UpperCAmelCase = flax_key_tuple[:-1] + ("weight",)
_UpperCAmelCase = jnp.transpose(__lowerCamelCase ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
_UpperCAmelCase = flax_key_tuple[:-1] + ("weight",)
_UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_UpperCAmelCase = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
_UpperCAmelCase = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
_UpperCAmelCase = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_UpperCAmelCase = ".".join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_UpperCAmelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_UpperCAmelCase = key.split(""".""" )
_UpperCAmelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_UpperCAmelCase = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
_UpperCAmelCase = key_components[-2] + "_v"
if name is not None:
_UpperCAmelCase = key_components[:-3] + [name]
_UpperCAmelCase = ".".join(__lowerCamelCase )
_UpperCAmelCase = key
if flax_key in special_pt_names:
_UpperCAmelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
_UpperCAmelCase = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase ,np.ndarray ) else flax_tensor
_UpperCAmelCase = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
_UpperCAmelCase = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__lowerCamelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"""If your task is similar to the task the model of the checkpoint was trained on, """
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase__ = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase__ = BASE_URL + """/user"""
# https://github.com/settings/tokens
UpperCAmelCase__ = os.environ.get("""USER_TOKEN""", """""")
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {
'''Authorization''': f'''token {auth_token}''',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_snake_case ,headers=_snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_snake_case : Optional[Any] = IFPipeline
_snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase_ ( self : int ):
return self._get_dummy_components()
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=0 ):
if str(snake_case__ ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase_ ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self._test_save_load_local()
def lowerCAmelCase_ ( self : str ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_UpperCAmelCase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_UpperCAmelCase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_UpperCAmelCase = None
_UpperCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_UpperCAmelCase = IFImgaImgPipeline(**pipe_a.components )
_UpperCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_UpperCAmelCase = IFInpaintingPipeline(**pipe_a.components )
_UpperCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any ):
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
_start_torch_memory_measurement()
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case__ )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_start_torch_memory_measurement()
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case__ )
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case__ )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case__ )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __UpperCAmelCase ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase = model_type_to_module_name(lowercase )
_UpperCAmelCase = importlib.import_module(f'''.{module_name}''' ,"""transformers.models""" )
try:
return getattr(lowercase ,lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowercase ,"""__name__""" ,lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase = importlib.import_module("""transformers""" )
if hasattr(lowercase ,lowercase ):
return getattr(lowercase ,lowercase )
return None
def __UpperCAmelCase ( lowercase ,lowercase = None ,lowercase = False ,lowercase = False ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = False ,**lowercase ,):
"""simple docstring"""
_UpperCAmelCase = get_file_from_repo(
lowercase ,lowercase ,cache_dir=lowercase ,force_download=lowercase ,resume_download=lowercase ,proxies=lowercase ,use_auth_token=lowercase ,revision=lowercase ,local_files_only=lowercase ,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowercase ,encoding="""utf-8""" ) as reader:
return json.load(lowercase )
class a :
def __init__( self : Tuple ):
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Dict ):
_UpperCAmelCase = kwargs.pop("""config""" , __UpperCAmelCase )
_UpperCAmelCase = kwargs.pop("""trust_remote_code""" , __UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
_UpperCAmelCase = config_dict.get("""feature_extractor_type""" , __UpperCAmelCase )
_UpperCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_UpperCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
_UpperCAmelCase = getattr(__UpperCAmelCase , """feature_extractor_type""" , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_UpperCAmelCase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_UpperCAmelCase = feature_extractor_class_from_name(__UpperCAmelCase )
_UpperCAmelCase = feature_extractor_auto_map is not None
_UpperCAmelCase = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
_UpperCAmelCase = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
_UpperCAmelCase = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
_UpperCAmelCase = kwargs.pop("""code_revision""" , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
_UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_UpperCAmelCase = True, True
_UpperCAmelCase = dfs(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
return path
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = -1
for i in range(snake_case_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_UpperCAmelCase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_UpperCAmelCase = check_circuit_or_path(snake_case_ ,snake_case_ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
_UpperCAmelCase = 1
if check == 2:
_UpperCAmelCase = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
_UpperCAmelCase = dfs(snake_case_ ,snake_case_ ,snake_case_ )
print(snake_case_ )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_UpperCAmelCase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_UpperCAmelCase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_UpperCAmelCase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_UpperCAmelCase = {
1: [],
2: []
# all degree is zero
}
_UpperCAmelCase = 10
check_euler(snake_case_ ,snake_case_ )
check_euler(snake_case_ ,snake_case_ )
check_euler(snake_case_ ,snake_case_ )
check_euler(snake_case_ ,snake_case_ )
check_euler(snake_case_ ,snake_case_ )
if __name__ == "__main__":
main()
| 355
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase__ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase__ = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
UpperCAmelCase__ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class a ( __UpperCamelCase ):
_snake_case : Any = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = MBartTokenizer
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self : List[Any] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : int="<unk>" , __lowerCAmelCase : str="<pad>" , __lowerCAmelCase : List[Any]="<mask>" , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
_UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_UpperCAmelCase = {
lang_code: self.convert_tokens_to_ids(__lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCAmelCase = src_lang if src_lang is not None else """en_XX"""
_UpperCAmelCase = self.convert_tokens_to_ids(self._src_lang )
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str ):
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = self.convert_tokens_to_ids(__lowerCAmelCase )
_UpperCAmelCase = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en_XX" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro_RO" , **__lowerCAmelCase : Any , ):
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self : str ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.convert_tokens_to_ids(__lowerCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
_UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : str ):
_UpperCAmelCase = self.convert_tokens_to_ids(__lowerCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
_UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 356
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class a ( folder_based_builder.FolderBasedBuilderConfig ):
_snake_case : bool = None
_snake_case : bool = None
class a ( folder_based_builder.FolderBasedBuilder ):
_snake_case : List[Any] = datasets.Audio()
_snake_case : str = 'audio'
_snake_case : Union[str, Any] = AudioFolderConfig
_snake_case : List[str] # definition at the bottom of the script
_snake_case : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
UpperCAmelCase__ = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 357
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): # noqa: E741
"""simple docstring"""
while r - l > 1:
_UpperCAmelCase = (l + r) // 2
if v[m] >= key:
_UpperCAmelCase = m
else:
_UpperCAmelCase = m # noqa: E741
return r
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return 0
_UpperCAmelCase = [0] * len(_UpperCamelCase )
_UpperCAmelCase = 1
_UpperCAmelCase = v[0]
for i in range(1 ,len(_UpperCamelCase ) ):
if v[i] < tail[0]:
_UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
_UpperCAmelCase = v[i]
length += 1
else:
_UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = tf.data.AUTOTUNE
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" ,type=lowerCAmelCase__ ,default="""roberta-base""" ,help="""The model config to use. Note that we don't copy the model's weights, only the config!""" ,)
parser.add_argument(
"""--tokenizer""" ,type=lowerCAmelCase__ ,default="""unigram-tokenizer-wikitext""" ,help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" ,)
parser.add_argument(
"""--per_replica_batch_size""" ,type=lowerCAmelCase__ ,default=8 ,help="""Batch size per TPU core.""" ,)
parser.add_argument(
"""--no_tpu""" ,action="""store_true""" ,help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" ,)
parser.add_argument(
"""--tpu_name""" ,type=lowerCAmelCase__ ,help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" ,default="""local""" ,)
parser.add_argument(
"""--tpu_zone""" ,type=lowerCAmelCase__ ,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" ,)
parser.add_argument(
"""--gcp_project""" ,type=lowerCAmelCase__ ,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" ,action="""store_true""" ,help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" ,)
parser.add_argument(
"""--train_dataset""" ,type=lowerCAmelCase__ ,help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" ,)
parser.add_argument(
"""--shuffle_buffer_size""" ,type=lowerCAmelCase__ ,default=2**18 ,help="""Size of the shuffle buffer (in samples)""" ,)
parser.add_argument(
"""--eval_dataset""" ,type=lowerCAmelCase__ ,help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowerCAmelCase__ ,default=1 ,help="""Number of epochs to train for.""" ,)
parser.add_argument(
"""--learning_rate""" ,type=lowerCAmelCase__ ,default=1E-4 ,help="""Learning rate to use for training.""" ,)
parser.add_argument(
"""--weight_decay_rate""" ,type=lowerCAmelCase__ ,default=1E-3 ,help="""Weight decay rate to use for training.""" ,)
parser.add_argument(
"""--max_length""" ,type=lowerCAmelCase__ ,default=5_12 ,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" ,)
parser.add_argument(
"""--mlm_probability""" ,type=lowerCAmelCase__ ,default=0.15 ,help="""Fraction of tokens to mask during training.""" ,)
parser.add_argument("""--output_dir""" ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" ,type=lowerCAmelCase__ ,help="""Model ID to upload to on the Hugging Face Hub.""" )
_UpperCAmelCase = parser.parse_args()
return args
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
if args.tpu_name:
_UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project )
else:
_UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = 0
for file in file_list:
_UpperCAmelCase = file.split("""/""" )[-1]
_UpperCAmelCase = re.search(R"""-\d+-(\d+)\.tfrecord""" ,lowerCAmelCase__ ).group(1 )
_UpperCAmelCase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = count_samples(lowerCAmelCase__ )
_UpperCAmelCase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
_UpperCAmelCase = dataset.shuffle(len(lowerCAmelCase__ ) )
_UpperCAmelCase = tf.data.TFRecordDataset(lowerCAmelCase__ ,num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_UpperCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
_UpperCAmelCase = dataset.map(lowerCAmelCase__ ,num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
_UpperCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
_UpperCAmelCase = dataset.batch(lowerCAmelCase__ ,drop_remainder=lowerCAmelCase__ )
_UpperCAmelCase = dataset.map(lowerCAmelCase__ ,num_parallel_calls=lowerCAmelCase__ )
_UpperCAmelCase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not args.no_tpu:
_UpperCAmelCase = initialize_tpu(lowerCAmelCase__ )
_UpperCAmelCase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
_UpperCAmelCase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
_UpperCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset ,"""*.tfrecord""" ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
_UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset ,"""*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
_UpperCAmelCase = count_samples(lowerCAmelCase__ )
_UpperCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_UpperCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
_UpperCAmelCase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_UpperCAmelCase , _UpperCAmelCase = create_optimizer(
num_train_steps=lowerCAmelCase__ ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ ,metrics=["""accuracy"""] )
def decode_fn(lowercase ):
_UpperCAmelCase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ ,lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ ,mlm_probability=args.mlm_probability ,mlm=lowerCAmelCase__ ,return_tensors="""tf""" )
def mask_with_collator(lowercase ):
# TF really needs an isin() function
_UpperCAmelCase = (
~tf.cast(batch["""attention_mask"""] ,tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
_UpperCAmelCase , _UpperCAmelCase = data_collator.tf_mask_tokens(
batch["""input_ids"""] ,vocab_size=len(lowerCAmelCase__ ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=lowerCAmelCase__ ,)
return batch
_UpperCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
_UpperCAmelCase = prepare_dataset(
lowerCAmelCase__ ,decode_fn=lowerCAmelCase__ ,mask_fn=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,shuffle=lowerCAmelCase__ ,shuffle_buffer_size=args.shuffle_buffer_size ,)
_UpperCAmelCase = prepare_dataset(
lowerCAmelCase__ ,decode_fn=lowerCAmelCase__ ,mask_fn=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,shuffle=lowerCAmelCase__ ,)
_UpperCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ ,validation_data=lowerCAmelCase__ ,epochs=args.num_epochs ,callbacks=lowerCAmelCase__ ,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCAmelCase__ = parse_args()
main(args)
| 359
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class a ( nn.Module ):
_snake_case : Optional[Any] = 42
_snake_case : Optional[int] = jnp.floataa
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , __lowerCAmelCase : List[str] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape
_UpperCAmelCase = jax.image.resize(
_snake_case , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_UpperCAmelCase = self.conv(_snake_case )
return hidden_states
class a ( nn.Module ):
_snake_case : Optional[int] = 42
_snake_case : List[Any] = jnp.floataa
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.conv(_snake_case )
return hidden_states
class a ( nn.Module ):
_snake_case : Dict = 42
_snake_case : List[Any] = None
_snake_case : Any = 0.0
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Conv(
_snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = nn.Dense(_snake_case , dtype=self.dtype )
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Dropout(self.dropout_prob )
_UpperCAmelCase = nn.Conv(
_snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCAmelCase = None
if use_nin_shortcut:
_UpperCAmelCase = nn.Conv(
_snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any=True ):
_UpperCAmelCase = hidden_states
_UpperCAmelCase = self.norma(_snake_case )
_UpperCAmelCase = nn.swish(_snake_case )
_UpperCAmelCase = self.conva(_snake_case )
_UpperCAmelCase = self.time_emb_proj(nn.swish(_snake_case ) )
_UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(_snake_case , 1 ) , 1 )
_UpperCAmelCase = hidden_states + temb
_UpperCAmelCase = self.norma(_snake_case )
_UpperCAmelCase = nn.swish(_snake_case )
_UpperCAmelCase = self.dropout(_snake_case , _snake_case )
_UpperCAmelCase = self.conva(_snake_case )
if self.conv_shortcut is not None:
_UpperCAmelCase = self.conv_shortcut(_snake_case )
return hidden_states + residual
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_UpperCAmelCase = key.replace("""module.encoder""" ,"""glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_UpperCAmelCase = key.replace("""module.decoder""" ,"""decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_UpperCAmelCase = key.replace(f'''patch_embed{idx}''' ,f'''patch_embeddings.{int(UpperCamelCase__ )-1}''' )
if "norm" in key:
_UpperCAmelCase = key.replace("""norm""" ,"""layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_UpperCAmelCase = key.replace(f'''layer_norm{idx}''' ,f'''layer_norm.{int(UpperCamelCase__ )-1}''' )
if "layer_norm1" in key:
_UpperCAmelCase = key.replace("""layer_norm1""" ,"""layer_norm_1""" )
if "layer_norm2" in key:
_UpperCAmelCase = key.replace("""layer_norm2""" ,"""layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_UpperCAmelCase = key.replace(f'''block{idx}''' ,f'''block.{int(UpperCamelCase__ )-1}''' )
if "attn.q" in key:
_UpperCAmelCase = key.replace("""attn.q""" ,"""attention.self.query""" )
if "attn.proj" in key:
_UpperCAmelCase = key.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in key:
_UpperCAmelCase = key.replace("""attn""" ,"""attention.self""" )
if "fc1" in key:
_UpperCAmelCase = key.replace("""fc1""" ,"""dense1""" )
if "fc2" in key:
_UpperCAmelCase = key.replace("""fc2""" ,"""dense2""" )
if "linear_pred" in key:
_UpperCAmelCase = key.replace("""linear_pred""" ,"""classifier""" )
if "linear_fuse" in key:
_UpperCAmelCase = key.replace("""linear_fuse.conv""" ,"""linear_fuse""" )
_UpperCAmelCase = key.replace("""linear_fuse.bn""" ,"""batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_UpperCAmelCase = key.replace(f'''linear_c{idx}''' ,f'''linear_c.{int(UpperCamelCase__ )-1}''' )
if "bot_conv" in key:
_UpperCAmelCase = key.replace("""bot_conv""" ,"""0.convolution""" )
if "skip_conv1" in key:
_UpperCAmelCase = key.replace("""skip_conv1""" ,"""1.convolution""" )
if "skip_conv2" in key:
_UpperCAmelCase = key.replace("""skip_conv2""" ,"""2.convolution""" )
if "fusion1" in key:
_UpperCAmelCase = key.replace("""fusion1""" ,"""1.fusion""" )
if "fusion2" in key:
_UpperCAmelCase = key.replace("""fusion2""" ,"""2.fusion""" )
if "fusion3" in key:
_UpperCAmelCase = key.replace("""fusion3""" ,"""3.fusion""" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase = key.replace("""conv""" ,"""convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_UpperCAmelCase = key.replace("""module.last_layer_depth""" ,"""head.head""" )
_UpperCAmelCase = value
return new_state_dict
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ ,stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] ,decoder_hidden_size=64 ,depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase = GLPNImageProcessor()
# prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCamelCase__ ,return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_UpperCAmelCase = torch.load(UpperCamelCase__ ,map_location=torch.device("""cpu""" ) )
# rename keys
_UpperCAmelCase = rename_keys(UpperCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__ ,UpperCamelCase__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = GLPNForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
_UpperCAmelCase = model(UpperCamelCase__ )
_UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
_UpperCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] ,UpperCamelCase__ ,atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ ,UpperCamelCase__ ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=UpperCamelCase__ ,)
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ ,UpperCamelCase__ ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=UpperCamelCase__ ,)
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
UpperCAmelCase__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 362
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( SCREAMING_SNAKE_CASE__ ):
_snake_case : Optional[Any] = 'vision-encoder-decoder'
_snake_case : Optional[Any] = True
def __init__( self : str , **__lowerCAmelCase : Optional[int] ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( SCREAMING_SNAKE_CASE__ ):
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
_UpperCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
_UpperCAmelCase = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = False , __lowerCAmelCase : Optional[Any] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = dummy_input["input_ids"].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(_SCREAMING_SNAKE_CASE )
return common_inputs
class a ( SCREAMING_SNAKE_CASE__ ):
@property
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] ):
return VisionEncoderDecoderEncoderOnnxConfig(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 363
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 0
|
"""simple docstring"""
from copy import deepcopy
class a :
def __init__( self : Tuple , __lowerCAmelCase : list[int] | None = None , __lowerCAmelCase : int | None = None ):
if arr is None and size is not None:
_UpperCAmelCase = size
_UpperCAmelCase = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError("""Either arr or size must be specified""" )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = len(UpperCAmelCase_ )
_UpperCAmelCase = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
_UpperCAmelCase = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_UpperCAmelCase = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : int ):
return index + (index & (-index))
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : int ):
return index - (index & (-index))
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_UpperCAmelCase = self.next_(UpperCAmelCase_ )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int ):
if right == 0:
return 0
_UpperCAmelCase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_UpperCAmelCase = self.prev(UpperCAmelCase_ )
return result
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int ):
return self.query(UpperCAmelCase_ , index + 1 )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int ):
value -= self.tree[0]
if value < 0:
return -1
_UpperCAmelCase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_UpperCAmelCase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 0
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
UpperCAmelCase__ = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
UpperCAmelCase__ = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Tuple="binary" , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCAmelCase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
UpperCAmelCase__ = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase ,__lowerCamelCase ) )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class a ( A_ ):
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str="replace" , __lowerCAmelCase : Optional[int]="<s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Any="</s>" , __lowerCAmelCase : int="<s>" , __lowerCAmelCase : List[Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : List[str]="<mask>" , __lowerCAmelCase : int=False , **__lowerCAmelCase : Union[str, Any] , ):
_UpperCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
_UpperCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
_UpperCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
_UpperCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
_UpperCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
_UpperCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(_lowerCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(R"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase_ ( self : List[str] ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(_lowerCamelCase )
_UpperCAmelCase = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(_lowerCamelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(_lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(_lowerCamelCase ):
try:
_UpperCAmelCase = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(_lowerCamelCase )
_UpperCAmelCase = new_word
if len(_lowerCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(_lowerCamelCase )
_UpperCAmelCase = """ """.join(_lowerCamelCase )
_UpperCAmelCase = word
return word
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict ):
_UpperCAmelCase = []
for token in re.findall(self.pat , _lowerCamelCase ):
_UpperCAmelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Any ):
return self.decoder.get(_lowerCamelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = """""".join(_lowerCamelCase )
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + """\n""" )
_UpperCAmelCase = 0
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(_lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=False , **__lowerCAmelCase : Tuple ):
_UpperCAmelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
_UpperCAmelCase = """ """ + text
return (text, kwargs)
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
_UpperCAmelCase = """ """.join(_lowerCamelCase )
_UpperCAmelCase = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 367
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 0
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class a ( lowerCAmelCase_ ):
def __init__( self : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[Any] ):
super().__init__(*_a , **_a )
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Any , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Tuple ):
_UpperCAmelCase = super().add_tokens(_a , *_a , **_a )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=1 , **__lowerCAmelCase : List[str] ):
_UpperCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(_a , *_a , **_a )
output.append(_a )
else:
_UpperCAmelCase = []
for i in range(_a ):
_UpperCAmelCase = placeholder_token + f'''_{i}'''
self.try_adding_tokens(_a , *_a , **_a )
output.append(_a )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
_UpperCAmelCase = output
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any=False , __lowerCAmelCase : Union[str, Any]=1.0 ):
if isinstance(_a , _a ):
_UpperCAmelCase = []
for i in range(len(_a ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_a ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_UpperCAmelCase = self.token_map[placeholder_token]
_UpperCAmelCase = tokens[: 1 + int(len(_a ) * prop_tokens_to_load )]
if vector_shuffle:
_UpperCAmelCase = copy.copy(_a )
random.shuffle(_a )
_UpperCAmelCase = text.replace(_a , """ """.join(_a ) )
return text
def __call__( self : int , __lowerCAmelCase : Tuple , *__lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : str=1.0 , **__lowerCAmelCase : int ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
_a , vector_shuffle=_a , prop_tokens_to_load=_a ) , *_a , **_a , )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[Any]=1.0 , **__lowerCAmelCase : Dict ):
return super().encode(
self.replace_placeholder_tokens_in_text(
_a , vector_shuffle=_a , prop_tokens_to_load=_a ) , *_a , **_a , )
| 368
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase__ = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase__ = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = len([g for position, g in enumerate(lowercase ) if g == main_target[position]] )
return (item, float(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.randint(0 ,len(lowercase ) - 1 )
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = list(lowercase )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCAmelCase = random.choice(lowercase )
return "".join(lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
_UpperCAmelCase = []
# Generate more children proportionally to the fitness score.
_UpperCAmelCase = int(parent_a[1] * 1_00 ) + 1
_UpperCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(lowercase ):
_UpperCAmelCase = population_score[random.randint(0 ,lowercase )][0]
_UpperCAmelCase = crossover(parent_a[0] ,lowercase )
# Append new string to the population list.
pop.append(mutate(lowercase ,lowercase ) )
pop.append(mutate(lowercase ,lowercase ) )
return pop
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = True ):
"""simple docstring"""
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_UpperCAmelCase = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCAmelCase = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase )
# Generate random starting population.
_UpperCAmelCase = []
for _ in range(lowercase ):
population.append("""""".join([random.choice(lowercase ) for i in range(len(lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCAmelCase = [evaluate(lowercase ,lowercase ) for item in population]
# Check if there is a matching evolution.
_UpperCAmelCase = sorted(lowercase ,key=lambda lowercase : x[1] ,reverse=lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase )
# Normalize population score to be between 0 and 1.
_UpperCAmelCase = [
(item, score / len(lowercase )) for item, score in population_score
]
# This is selection
for i in range(lowercase ):
population.extend(select(population_score[int(lowercase )] ,lowercase ,lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase__ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCAmelCase__ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\"""
)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return choice(_lowerCAmelCase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random_pivot(_lowerCAmelCase )
# partition based on pivot
# linear time
_UpperCAmelCase = [e for e in lst if e < pivot]
_UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowerCAmelCase ) < k - 1:
return kth_number(_lowerCAmelCase ,k - len(_lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowerCAmelCase ,_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 0
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase__ = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCAmelCase__ = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCAmelCase__ = """|""".join(sys.argv[1:])
UpperCAmelCase__ = re.compile(rF'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase__ = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 371
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 0
|
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / 'file.csv'
_UpperCAmelCase = textwrap.dedent(
"""\\n header1,header2\n 1,2\n 10,20\n """ )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / 'malformed_file.csv'
_UpperCAmelCase = textwrap.dedent(
"""\\n header1,header2\n 1,2\n 10,20,\n """ )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / 'csv_with_image.csv'
_UpperCAmelCase = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / 'csv_with_label.csv'
_UpperCAmelCase = textwrap.dedent(
"""\\n label\n good\n bad\n good\n """ )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
@pytest.fixture
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = tmp_path / 'csv_with_int_list.csv'
_UpperCAmelCase = textwrap.dedent(
"""\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n """ )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write(_UpperCAmelCase )
return str(_UpperCAmelCase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = Csv()
_UpperCAmelCase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_UpperCAmelCase ,match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(_UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(_UpperCAmelCase ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read().splitlines()[1]
_UpperCAmelCase = Csv(encoding="""utf-8""" ,features=Features({"""image""": Image()} ) )
_UpperCAmelCase = csv._generate_tables([[csv_file_with_image]] )
_UpperCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
_UpperCAmelCase = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(_UpperCAmelCase ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read().splitlines()[1:]
_UpperCAmelCase = Csv(encoding="""utf-8""" ,features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
_UpperCAmelCase = csv._generate_tables([[csv_file_with_label]] )
_UpperCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
_UpperCAmelCase = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(_UpperCAmelCase ) for label in labels]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = Csv(encoding="""utf-8""" ,sep=""",""" ,converters={"""int_list""": lambda lowercase : [int(_UpperCAmelCase ) for i in x.split()]} )
_UpperCAmelCase = csv._generate_tables([[csv_file_with_int_list]] )
_UpperCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
_UpperCAmelCase = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase__ = logging.get_logger(__name__)
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Any , **__lowerCAmelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_UpperCAmelCase = deprecated_arg[3:]
_UpperCAmelCase = not kwargs.pop(__SCREAMING_SNAKE_CASE )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
_UpperCAmelCase = kwargs.pop("""tpu_name""" , self.tpu_name )
_UpperCAmelCase = kwargs.pop("""device_idx""" , self.device_idx )
_UpperCAmelCase = kwargs.pop("""eager_mode""" , self.eager_mode )
_UpperCAmelCase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__SCREAMING_SNAKE_CASE )
_snake_case : str = field(
default=lowerCAmelCase_ , metadata={'help': 'Name of TPU'} , )
_snake_case : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Benchmark models in eager model.'} )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCAmelCase_ ( self : str ):
requires_backends(self , ["""tf"""] )
_UpperCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
_UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_UpperCAmelCase = None
return tpu
@cached_property
def lowerCAmelCase_ ( self : Any ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_UpperCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
_UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
_UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase_ ( self : int ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def lowerCAmelCase_ ( self : str ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def lowerCAmelCase_ ( self : Any ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.n_gpu > 0
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase__ = """\
"""
UpperCAmelCase__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCAmelCase__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] = 16 , __lowerCAmelCase : List[Any] = True , __lowerCAmelCase : str=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = """cuda"""
else:
_UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = model.to(__lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowerCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="""pt""" , return_attention_mask=__lowerCAmelCase , ).to(__lowerCAmelCase )
_UpperCAmelCase = encodings["""input_ids"""]
_UpperCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ) ):
_UpperCAmelCase = min(start_index + batch_size , len(__lowerCAmelCase ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowerCAmelCase ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __lowerCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCAmelCase )}
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase__ = """base_with_context"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCAmelCase = weights[f'''layers_{lyr_num}''']
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = ly_weight["""attention"""]
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCAmelCase = weights[f'''layers_{lyr_num}''']
_UpperCAmelCase = ly_weight["""attention"""]
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_UpperCAmelCase = weights[f'''layers_{lyr_num}''']
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_UpperCAmelCase = ly_weight["""self_attention"""]
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_UpperCAmelCase = ly_weight["""MultiHeadDotProductAttention_0"""]
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_UpperCAmelCase = jnp.tree_util.tree_map(onp.array ,_lowerCAmelCase )
_UpperCAmelCase = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
_UpperCAmelCase = os.path.join(args.checkpoint_path ,"""..""" ,"""config.gin""" )
_UpperCAmelCase = inference.parse_training_gin_file(_lowerCAmelCase ,_lowerCAmelCase )
_UpperCAmelCase = inference.InferenceModel(args.checkpoint_path ,_lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ,variance_type="""fixed_large""" )
_UpperCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="""gated-gelu""" ,)
_UpperCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length["""targets_context"""] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="""gated-gelu""" ,)
_UpperCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length["""targets_context"""] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
_UpperCAmelCase = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] ,_lowerCAmelCase )
_UpperCAmelCase = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] ,_lowerCAmelCase )
_UpperCAmelCase = load_decoder(ta_checkpoint["""target"""]["""decoder"""] ,_lowerCAmelCase )
_UpperCAmelCase = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_UpperCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCAmelCase ,continuous_encoder=_lowerCAmelCase ,decoder=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,melgan=_lowerCAmelCase ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase__ = parser.parse_args()
main(args)
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
from collections.abc import Callable
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = a
_UpperCAmelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
_UpperCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
_UpperCAmelCase = mid
else:
_UpperCAmelCase = mid
_UpperCAmelCase = start + (end - start) / 2.0
return mid
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCAmelCase , unittest.TestCase ):
_snake_case : Tuple = PhobertTokenizer
_snake_case : List[Any] = False
def lowerCAmelCase_ ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
_UpperCAmelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''l à</w>''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def lowerCAmelCase_ ( self : str , **__lowerCAmelCase : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = '''Tôi là VinAI Research'''
_UpperCAmelCase = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''Tôi là VinAI Research'''
_UpperCAmelCase = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
_UpperCAmelCase = tokenizer.tokenize(lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 356
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ ,A__ )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(A__ ,A__ ,bias=A__ )
_UpperCAmelCase = emb.weight.data
return lin_layer
def __UpperCAmelCase ( lowercase ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = {}
for old_key in state_dict.keys():
_UpperCAmelCase = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_UpperCAmelCase = key.replace("""moe_layer.experts.0""" ,f'''ffn.experts.expert_{expert_idx}''' )
else:
_UpperCAmelCase = key.replace("""moe_layer.experts.""" ,"""ffn.experts.expert_""" )
if "gate" in key:
_UpperCAmelCase = key.replace(""".moe_layer.gate.wg""" ,""".ffn.router.classifier""" )
if "fc2" and "experts" not in key:
_UpperCAmelCase = key.replace(""".fc2.""" ,""".ffn.fc2.""" )
if "fc1" and "experts" not in key:
_UpperCAmelCase = key.replace(""".fc1.""" ,""".ffn.fc1.""" )
if ".encoder_attn." in key:
_UpperCAmelCase = key.replace(""".encoder_attn.""" ,""".cross_attention.""" )
if "encoder_attn_layer_norm" in key:
_UpperCAmelCase = key.replace("""encoder_attn_layer_norm""" ,"""cross_attention_layer_norm""" )
if "final_layer_norm" in key:
_UpperCAmelCase = key.replace("""final_layer_norm""" ,"""ff_layer_norm""" )
_UpperCAmelCase = state_dict[old_key]
return new_dict
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase = WEIGHTS_NAME ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 0
os.makedirs(A__ ,exist_ok=A__ )
for expert in range(A__ ):
_UpperCAmelCase = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(A__ ):
_UpperCAmelCase = torch.load(A__ )["""model"""]
remove_ignore_keys_(A__ )
_UpperCAmelCase = rename_fairseq_keys(A__ ,A__ )
_UpperCAmelCase = os.path.join(
A__ ,weights_name.replace(""".bin""" ,f'''-{len(A__ )+1:05d}-of-???.bin''' ) )
torch.save(A__ ,A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
_UpperCAmelCase = os.path.join(A__ ,weights_name.replace(""".bin""" ,f'''-{len(A__ )+1:05d}-of-???.bin''' ) )
_UpperCAmelCase = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(A__ )
_UpperCAmelCase = rename_fairseq_keys(A__ ,A__ )
_UpperCAmelCase = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
_UpperCAmelCase = os.path.join(A__ ,A__ )
torch.save(A__ ,A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ ,A__ )
# Otherwise, let's build the index
_UpperCAmelCase = {}
for idx, shard in enumerate(A__ ):
_UpperCAmelCase = weights_name.replace(""".bin""" ,f'''-{idx+1:05d}-of-{len(A__ ):05d}.bin''' )
_UpperCAmelCase = os.path.join(A__ ,weights_name.replace(""".bin""" ,f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A__ ,os.path.join(A__ ,A__ ) )
for key in shard:
_UpperCAmelCase = shard_file
# Add the metadata
_UpperCAmelCase = {"""total_size""": total_size}
_UpperCAmelCase = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(A__ ,A__ ) ,"""w""" ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = json.dumps(A__ ,indent=2 ,sort_keys=A__ ) + """\n"""
f.write(A__ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ , UpperCAmelCase__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
UpperCAmelCase__ = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 357
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase__ = 1.0_5457_1817E-34 # unit of ℏ : J * s
UpperCAmelCase__ = 3E8 # unit of c : m * s^-1
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
_UpperCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_UpperCAmelCase = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_UpperCAmelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase__ = 6_3_7_8_1_3_7.0
UpperCAmelCase__ = 6_3_5_6_7_5_2.3_1_4_2_4_5
UpperCAmelCase__ = 6_3_7_8_1_3_7
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCAmelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase = haversine_distance(a__ ,a__ ,a__ ,a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase = (b_lata + b_lata) / 2
_UpperCAmelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase = (sin(a__ ) ** 2) * (cos(a__ ) ** 2)
_UpperCAmelCase = cos(sigma / 2 ) ** 2
_UpperCAmelCase = (sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase = (cos(a__ ) ** 2) * (sin(a__ ) ** 2)
_UpperCAmelCase = sin(sigma / 2 ) ** 2
_UpperCAmelCase = (sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = namedtuple("""result""" ,"""name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" ,power / current )
elif current == 0:
return result("""current""" ,power / voltage )
elif power == 0:
return result("""power""" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase ,np.ndarray ):
return list(tensor.shape )
_UpperCAmelCase = tf.shape(__lowerCAmelCase )
if tensor.shape == tf.TensorShape(__lowerCAmelCase ):
return dynamic
_UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCAmelCase )]
def __UpperCAmelCase ( lowercase ,lowercase = None ,lowercase = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 ,axis=__lowerCAmelCase ,name=__lowerCAmelCase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=1E-5 ,lowercase=-1 ):
"""simple docstring"""
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
_UpperCAmelCase = tf.nn.moments(__lowerCAmelCase ,axes=[axis] ,keepdims=__lowerCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCAmelCase = [1] * inputs.shape.rank
_UpperCAmelCase = shape_list(__lowerCAmelCase )[axis]
_UpperCAmelCase = tf.reshape(__lowerCAmelCase ,__lowerCAmelCase )
_UpperCAmelCase = tf.reshape(__lowerCAmelCase ,__lowerCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
_UpperCAmelCase = tf.nn.batch_normalization(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,offset=__lowerCAmelCase ,scale=__lowerCAmelCase ,variance_epsilon=__lowerCAmelCase ,)
return outputs
def __UpperCAmelCase ( lowercase ,lowercase=0 ,lowercase=-1 ):
"""simple docstring"""
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCAmelCase = tf.shape(__lowerCAmelCase )
_UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(__lowerCAmelCase ,__lowerCAmelCase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase ,tf.Tensor ):
_UpperCAmelCase = tf.convert_to_tensor(__lowerCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCAmelCase = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
__lowerCAmelCase ,tf.cast(__lowerCAmelCase ,dtype=tensor.dtype ) ,message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCAmelCase )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) ,)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCAmelCase = [x for x in data if len(__lowerCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
_UpperCAmelCase = np.asarray(__lowerCAmelCase )
_UpperCAmelCase = 1
_UpperCAmelCase = np.array_split(__lowerCAmelCase ,__lowerCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCAmelCase = np.array_split(__lowerCAmelCase ,__lowerCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCAmelCase ):
_UpperCAmelCase = chunk_data
else:
_UpperCAmelCase = data
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if name in group.attrs:
_UpperCAmelCase = [n.decode("""utf8""" ) if hasattr(__lowerCAmelCase ,"""decode""" ) else n for n in group.attrs[name]]
else:
_UpperCAmelCase = []
_UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(__lowerCAmelCase ,"""decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
def _expand_single_ad_tensor(lowercase ):
if isinstance(__lowerCAmelCase ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCAmelCase ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,__lowerCAmelCase )
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase__ = '<<<<<<< This should probably be modified because it mentions: '
UpperCAmelCase__ = '=======\n>>>>>>>\n'
UpperCAmelCase__ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
UpperCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return ConvertCommand(args.tfds_path ,args.datasets_directory )
class a ( _A ):
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : ArgumentParser ):
_UpperCAmelCase = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : List[Any] ):
_UpperCAmelCase = get_logger("""datasets-cli/converting""" )
_UpperCAmelCase = tfds_path
_UpperCAmelCase = datasets_directory
def lowerCAmelCase_ ( self : str ):
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_UpperCAmelCase = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
_UpperCAmelCase = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.listdir(__SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = []
for line in lines:
_UpperCAmelCase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_UpperCAmelCase = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
_UpperCAmelCase = """"""
continue
elif "from absl import logging" in out_line:
_UpperCAmelCase = """from datasets import logging\n"""
elif "getLogger" in out_line:
_UpperCAmelCase = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_UpperCAmelCase = True
_UpperCAmelCase = list(filter(lambda __lowerCAmelCase : e in out_line , __SCREAMING_SNAKE_CASE ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__SCREAMING_SNAKE_CASE ) + """\n""" )
out_lines.append(__SCREAMING_SNAKE_CASE )
out_lines.append(__SCREAMING_SNAKE_CASE )
continue
else:
for pattern, replacement in TO_CONVERT:
_UpperCAmelCase = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_UpperCAmelCase = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , __SCREAMING_SNAKE_CASE )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
_UpperCAmelCase = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_UpperCAmelCase = True
out_lines.append(__SCREAMING_SNAKE_CASE )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_UpperCAmelCase = f_name.replace(""".py""" , """""" )
_UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__SCREAMING_SNAKE_CASE )
if needs_manual_update:
with_manual_update.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_UpperCAmelCase = os.path.basename(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 362
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **__lowerCAmelCase : List[str] ):
super().__init__(**_SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : Dict , **__lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
_UpperCAmelCase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
_UpperCAmelCase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
_UpperCAmelCase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
_UpperCAmelCase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
_UpperCAmelCase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
_UpperCAmelCase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
_UpperCAmelCase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
_UpperCAmelCase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
_UpperCAmelCase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
_UpperCAmelCase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
_UpperCAmelCase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
_UpperCAmelCase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : int , __lowerCAmelCase : int , *__lowerCAmelCase : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str ):
return super().__call__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict=64 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : float = 512 / 1500 , __lowerCAmelCase : Optional[int] = 32 , __lowerCAmelCase : Optional[int] = 1 , ):
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processor.size["longest_edge"]
_UpperCAmelCase = self.image_processor.generate_crop_boxes(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_UpperCAmelCase = self.get_inference_context()
with inference_context():
_UpperCAmelCase = self._ensure_tensor_on_device(_SCREAMING_SNAKE_CASE , device=self.device )
_UpperCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_UpperCAmelCase = image_embeddings
_UpperCAmelCase = grid_points.shape[1]
_UpperCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
_UpperCAmelCase = input_labels[:, i : i + points_per_batch]
_UpperCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int]=0.88 , __lowerCAmelCase : int=0.95 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[str]=1 , ):
_UpperCAmelCase = model_inputs.pop("""input_boxes""" )
_UpperCAmelCase = model_inputs.pop("""is_last""" )
_UpperCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
_UpperCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_UpperCAmelCase = self.model(**_SCREAMING_SNAKE_CASE )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_UpperCAmelCase = model_outputs["pred_masks"]
_UpperCAmelCase = self.image_processor.post_process_masks(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , binarize=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model_outputs["iou_scores"]
_UpperCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Union[str, Any]=0.7 , ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_UpperCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processor.post_process_for_mask_generation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {}
if output_rle_mask:
_UpperCAmelCase = rle_mask
if output_bboxes_mask:
_UpperCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 363
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( __snake_case ):
_snake_case : int = ["image_processor", "tokenizer"]
_snake_case : int = "AutoImageProcessor"
_snake_case : Optional[Any] = "AutoTokenizer"
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ):
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
_UpperCAmelCase = self.image_processor
def __call__( self : Tuple , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Union[str, Any] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_UpperCAmelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
_UpperCAmelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def lowerCAmelCase_ ( self : int , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : int ):
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return ["input_ids", "attention_mask", "pixel_values"]
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase=True ,lowercase="pt" ):
"""simple docstring"""
_UpperCAmelCase = {"""add_prefix_space""": True} if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and not line.startswith(""" """ ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] ,max_length=SCREAMING_SNAKE_CASE_ ,padding="""max_length""" if pad_to_max_length else None ,truncation=SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=None ,):
"""simple docstring"""
_UpperCAmelCase = input_ids.ne(SCREAMING_SNAKE_CASE_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a ( lowerCAmelCase_ ):
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int]="train" , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Any="" , ):
super().__init__()
_UpperCAmelCase = Path(_lowercase ).joinpath(type_path + """.source""" )
_UpperCAmelCase = Path(_lowercase ).joinpath(type_path + """.target""" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Dict ):
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowerCAmelCase : Dict ):
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , _lowercase ).rstrip("""\n""" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , _lowercase ).rstrip("""\n""" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
_UpperCAmelCase = encode_line(_lowercase , _lowercase , self.max_source_length , """right""" )
_UpperCAmelCase = encode_line(_lowercase , _lowercase , self.max_target_length , """right""" )
_UpperCAmelCase = source_inputs["""input_ids"""].squeeze()
_UpperCAmelCase = target_inputs["""input_ids"""].squeeze()
_UpperCAmelCase = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : Union[str, Any] ):
return [len(_lowercase ) for x in Path(_lowercase ).open().readlines()]
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = torch.stack([x["""input_ids"""] for x in batch] )
_UpperCAmelCase = torch.stack([x["""attention_mask"""] for x in batch] )
_UpperCAmelCase = torch.stack([x["""decoder_input_ids"""] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(_lowercase , _lowercase )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(_lowercase , _lowercase , attention_mask=_lowercase )
_UpperCAmelCase = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_git_info()
save_json(SCREAMING_SNAKE_CASE_ ,os.path.join(SCREAMING_SNAKE_CASE_ ,"""git_log.json""" ) )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=4 ,**lowercase ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ ,"""w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,indent=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = {
"""repo_id""": str(SCREAMING_SNAKE_CASE_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return list(map(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
def remove_articles(lowercase ):
return re.sub(R"""\b(a|an|the)\b""" ,""" """ ,SCREAMING_SNAKE_CASE_ )
def white_space_fix(lowercase ):
return " ".join(text.split() )
def remove_punc(lowercase ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
_UpperCAmelCase = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
_UpperCAmelCase = Counter(SCREAMING_SNAKE_CASE_ ) & Counter(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
em += exact_match_score(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
em /= len(SCREAMING_SNAKE_CASE_ )
return {"em": em}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = """dropout_rate"""
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
if not hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and not hasattr(SCREAMING_SNAKE_CASE_ ,equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
continue
_UpperCAmelCase = p if hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return hparams, config
| 365
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCAmelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( lowerCamelCase_ ):
_snake_case : Any = """albert"""
def __init__( self : List[str] , __lowerCAmelCase : Tuple=3_0000 , __lowerCAmelCase : Optional[int]=128 , __lowerCAmelCase : Dict=4096 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=64 , __lowerCAmelCase : List[Any]=1_6384 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : Union[str, Any]="gelu_new" , __lowerCAmelCase : str=0 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[Any]=1e-1_2 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[Any]=3 , **__lowerCAmelCase : Any , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = inner_group_num
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = position_embedding_type
class a ( lowerCamelCase_ ):
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not len(lowercase ) == len(lowercase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_UpperCAmelCase = equationa
_UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 367
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'megatron-bert'
def __init__( self : Optional[Any] , __lowerCAmelCase : int=2_9056 , __lowerCAmelCase : Dict=1024 , __lowerCAmelCase : Union[str, Any]=24 , __lowerCAmelCase : List[str]=16 , __lowerCAmelCase : List[str]=4096 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : List[Any]=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Dict=1e-1_2 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : Union[str, Any]=True , **__lowerCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
| 368
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(_lowerCamelCase )
_UpperCAmelCase = len(_lowerCamelCase )
_UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase = True
for i in range(_lowerCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase = True
if a[i].islower():
_UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class a ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Dict ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.values[key]
def lowerCAmelCase_ ( self : List[Any] ):
return (
sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 370
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=18 , __lowerCAmelCase : Dict=30 , __lowerCAmelCase : Any=400 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( a_ , unittest.TestCase ):
_snake_case : str = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """crop_size""" ) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 371
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
return number - int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase__ = False
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = 'ybelkada/fonts'
def __UpperCAmelCase ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
requires_backends(UpperCamelCase__ ,["""torch"""] )
_check_torch_version()
_UpperCAmelCase = image_tensor.unsqueeze(0 )
_UpperCAmelCase = torch.nn.functional.unfold(UpperCamelCase__ ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
_UpperCAmelCase = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,UpperCamelCase__ ,UpperCamelCase__ ,-1 )
_UpperCAmelCase = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def __UpperCAmelCase ( lowercase ,lowercase = 36 ,lowercase = "black" ,lowercase = "white" ,lowercase = 5 ,lowercase = 5 ,lowercase = 5 ,lowercase = 5 ,lowercase = None ,lowercase = None ,):
"""simple docstring"""
requires_backends(UpperCamelCase__ ,"""vision""" )
# Add new lines so that each line is no more than 80 characters.
_UpperCAmelCase = textwrap.TextWrapper(width=80 )
_UpperCAmelCase = wrapper.wrap(text=UpperCamelCase__ )
_UpperCAmelCase = '''\n'''.join(UpperCamelCase__ )
if font_bytes is not None and font_path is None:
_UpperCAmelCase = io.BytesIO(UpperCamelCase__ )
elif font_path is not None:
_UpperCAmelCase = font_path
else:
_UpperCAmelCase = hf_hub_download(UpperCamelCase__ ,"""Arial.TTF""" )
_UpperCAmelCase = ImageFont.truetype(UpperCamelCase__ ,encoding="""UTF-8""" ,size=UpperCamelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_UpperCAmelCase = ImageDraw.Draw(Image.new("""RGB""" ,(1, 1) ,UpperCamelCase__ ) )
_UpperCAmelCase = temp_draw.textbbox((0, 0) ,UpperCamelCase__ ,UpperCamelCase__ )
# Create the actual image with a bit of padding around the text.
_UpperCAmelCase = text_width + left_padding + right_padding
_UpperCAmelCase = text_height + top_padding + bottom_padding
_UpperCAmelCase = Image.new("""RGB""" ,(image_width, image_height) ,UpperCamelCase__ )
_UpperCAmelCase = ImageDraw.Draw(UpperCamelCase__ )
draw.text(xy=(left_padding, top_padding) ,text=UpperCamelCase__ ,fill=UpperCamelCase__ ,font=UpperCamelCase__ )
return image
def __UpperCAmelCase ( lowercase ,lowercase ,**lowercase ):
"""simple docstring"""
requires_backends(UpperCamelCase__ ,"""vision""" )
# Convert to PIL image if necessary
_UpperCAmelCase = to_pil_image(UpperCamelCase__ )
_UpperCAmelCase = render_text(UpperCamelCase__ ,**UpperCamelCase__ )
_UpperCAmelCase = max(header_image.width ,image.width )
_UpperCAmelCase = int(image.height * (new_width / image.width) )
_UpperCAmelCase = int(header_image.height * (new_width / header_image.width) )
_UpperCAmelCase = Image.new("""RGB""" ,(new_width, new_height + new_header_height) ,"""white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
_UpperCAmelCase = to_numpy_array(UpperCamelCase__ )
if infer_channel_dimension_format(UpperCamelCase__ ) == ChannelDimension.LAST:
_UpperCAmelCase = to_channel_dimension_format(UpperCamelCase__ ,ChannelDimension.LAST )
return new_image
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = ['flattened_patches']
def __init__( self : Optional[int] , __lowerCAmelCase : str = True , __lowerCAmelCase : List[Any] = True , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : List[Any] = 2048 , __lowerCAmelCase : List[str] = False , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_convert_rgb
_UpperCAmelCase = max_patches
_UpperCAmelCase = is_vqa
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
_UpperCAmelCase = to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.FIRST )
_UpperCAmelCase = torch.from_numpy(__lowerCAmelCase )
_UpperCAmelCase = patch_size['''height'''], patch_size['''width''']
_UpperCAmelCase = get_image_size(__lowerCAmelCase )
# maximize scale s.t.
_UpperCAmelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_UpperCAmelCase = max(min(math.floor(scale * image_height / patch_height ) , __lowerCAmelCase ) , 1 )
_UpperCAmelCase = max(min(math.floor(scale * image_width / patch_width ) , __lowerCAmelCase ) , 1 )
_UpperCAmelCase = max(num_feasible_rows * patch_height , 1 )
_UpperCAmelCase = max(num_feasible_cols * patch_width , 1 )
_UpperCAmelCase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=__lowerCAmelCase , antialias=__lowerCAmelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_UpperCAmelCase = torch_extract_patches(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = patches.shape
_UpperCAmelCase = patches_shape[1]
_UpperCAmelCase = patches_shape[2]
_UpperCAmelCase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_UpperCAmelCase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_UpperCAmelCase = torch.arange(__lowerCAmelCase ).reshape([rows, 1] ).repeat(1 , __lowerCAmelCase ).reshape([rows * columns, 1] )
_UpperCAmelCase = torch.arange(__lowerCAmelCase ).reshape([1, columns] ).repeat(__lowerCAmelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_UpperCAmelCase = row_ids.to(torch.floataa )
_UpperCAmelCase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_UpperCAmelCase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_UpperCAmelCase = torch.nn.functional.pad(__lowerCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
_UpperCAmelCase = to_numpy_array(__lowerCAmelCase )
return result
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] = None , **__lowerCAmelCase : List[Any] ):
if image.dtype == np.uinta:
_UpperCAmelCase = image.astype(np.floataa )
# take mean across the whole `image`
_UpperCAmelCase = np.mean(__lowerCAmelCase )
_UpperCAmelCase = np.std(__lowerCAmelCase )
_UpperCAmelCase = max(__lowerCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : str = ChannelDimension.FIRST , **__lowerCAmelCase : Union[str, Any] , ):
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase = patch_size if patch_size is not None else self.patch_size
_UpperCAmelCase = max_patches if max_patches is not None else self.max_patches
_UpperCAmelCase = self.is_vqa
if kwargs.get("""data_format""" , __lowerCAmelCase ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
_UpperCAmelCase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
_UpperCAmelCase = kwargs.pop("""font_bytes""" , __lowerCAmelCase )
_UpperCAmelCase = kwargs.pop("""font_path""" , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = [header_text] * len(__lowerCAmelCase )
_UpperCAmelCase = [
render_header(__lowerCAmelCase , header_text[i] , font_bytes=__lowerCAmelCase , font_path=__lowerCAmelCase )
for i, image in enumerate(__lowerCAmelCase )
]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__lowerCAmelCase ) for image in images]
# convert to torch tensor and permute
_UpperCAmelCase = [
self.extract_flattened_patches(image=__lowerCAmelCase , max_patches=__lowerCAmelCase , patch_size=__lowerCAmelCase )
for image in images
]
# create attention mask in numpy
_UpperCAmelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_UpperCAmelCase = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model
_UpperCAmelCase = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,UpperCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import math
def __UpperCAmelCase ( lowercase ,lowercase = 0 ,lowercase = 0 ):
"""simple docstring"""
_UpperCAmelCase = end or len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ,_lowerCAmelCase ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): # Max Heap
"""simple docstring"""
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase = array[largest], array[index]
heapify(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(n // 2 ,-1 ,-1 ):
heapify(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
for i in range(n - 1 ,0 ,-1 ):
_UpperCAmelCase = array[0], array[i]
heapify(_lowerCAmelCase ,0 ,_lowerCAmelCase )
return array
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase = array[j], array[i]
i += 1
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_lowerCAmelCase ) ) )
_UpperCAmelCase = 16
return intro_sort(_lowerCAmelCase ,0 ,len(_lowerCAmelCase ) ,_lowerCAmelCase ,_lowerCAmelCase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_lowerCAmelCase )
max_depth -= 1
_UpperCAmelCase = median_of_a(_lowerCAmelCase ,_lowerCAmelCase ,start + ((end - start) // 2) + 1 ,end - 1 )
_UpperCAmelCase = partition(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
intro_sort(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
_UpperCAmelCase = p
return insertion_sort(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = input("""Enter numbers separated by a comma : """).strip()
UpperCAmelCase__ = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCAmelCase__ = False
class a ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def lowerCAmelCase_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class a ( unittest.TestCase ):
_snake_case : Optional[int] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_UpperCAmelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = vqa_pipeline(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
[{"""score""": ANY(lowercase_ ), """answer""": ANY(lowercase_ )}],
[{"""score""": ANY(lowercase_ ), """answer""": ANY(lowercase_ )}],
] , )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_UpperCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_UpperCAmelCase = """How many cats are there?"""
_UpperCAmelCase = vqa_pipeline(image=lowercase_ , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
lowercase_ , [{"""score""": ANY(lowercase_ ), """answer""": ANY(lowercase_ )}, {"""score""": ANY(lowercase_ ), """answer""": ANY(lowercase_ )}] )
_UpperCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
lowercase_ , [{"""score""": ANY(lowercase_ ), """answer""": ANY(lowercase_ )}, {"""score""": ANY(lowercase_ ), """answer""": ANY(lowercase_ )}] )
@slow
@require_torch
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_UpperCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_UpperCAmelCase = """How many cats are there?"""
_UpperCAmelCase = vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
_UpperCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
_UpperCAmelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
| 355
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
from random import random
class a :
def __init__( self : Dict , __lowerCAmelCase : int = None ):
_UpperCAmelCase = value
_UpperCAmelCase = random()
_UpperCAmelCase = None
_UpperCAmelCase = None
def __repr__( self : int ):
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : Union[str, Any] ):
_UpperCAmelCase = str(self.value ) + """ """
_UpperCAmelCase = str(self.left or """""" )
_UpperCAmelCase = str(self.right or """""" )
return value + left + right
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_UpperCAmelCase , _UpperCAmelCase = split(root.left ,lowercase_ )
return left, root
else:
_UpperCAmelCase , _UpperCAmelCase = split(root.right ,lowercase_ )
return root, right
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_UpperCAmelCase = merge(left.right ,lowercase_ )
return left
else:
_UpperCAmelCase = merge(lowercase_ ,right.left )
return right
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = Node(lowercase_ )
_UpperCAmelCase , _UpperCAmelCase = split(lowercase_ ,lowercase_ )
return merge(merge(lowercase_ ,lowercase_ ) ,lowercase_ )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = split(lowercase_ ,value - 1 )
_UpperCAmelCase , _UpperCAmelCase = split(lowercase_ ,lowercase_ )
return merge(lowercase_ ,lowercase_ )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value ,end=""",""" )
inorder(root.right )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
_UpperCAmelCase = insert(lowercase_ ,int(arg[1:] ) )
elif arg[0] == "-":
_UpperCAmelCase = erase(lowercase_ ,int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. \'q\' to quit. """ )
_UpperCAmelCase = input()
while args != "q":
_UpperCAmelCase = interact_treap(lowercase_ ,lowercase_ )
print(lowercase_ )
_UpperCAmelCase = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 0
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase__ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
UpperCAmelCase__ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
UpperCAmelCase__ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCAmelCase__ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCAmelCase__ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
_UpperCAmelCase = k.replace(A_ ,A_ )
return k
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = BigBirdPegasusConfig(**A_ )
_UpperCAmelCase = BigBirdPegasusForConditionalGeneration(A_ )
_UpperCAmelCase = torch_model.state_dict()
_UpperCAmelCase = {}
# separating decoder weights
_UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
_UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() ,"""tf -> hf conversion""" ):
_UpperCAmelCase = [k.endswith(A_ ) for ending in KEYS_TO_IGNORE]
if any(A_ ):
continue
_UpperCAmelCase = DECODER_PATTERNS
_UpperCAmelCase = rename_state_dict_key(A_ ,A_ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.from_numpy(A_ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() ,"""tf -> hf conversion""" ):
_UpperCAmelCase = [k.endswith(A_ ) for ending in KEYS_TO_IGNORE]
if any(A_ ):
continue
_UpperCAmelCase = REMAINING_PATTERNS
_UpperCAmelCase = rename_state_dict_key(A_ ,A_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.from_numpy(A_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
_UpperCAmelCase = mapping['''model.embed_positions.weight''']
_UpperCAmelCase = mapping.pop("""model.embed_positions.weight""" )
_UpperCAmelCase = torch_model.load_state_dict(A_ ,strict=A_ )
_UpperCAmelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = tf.train.list_variables(A_ )
_UpperCAmelCase = {}
_UpperCAmelCase = ['''global_step''']
for name, shape in tqdm(A_ ,desc="""converting tf checkpoint to dict""" ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(A_ ,A_ )
_UpperCAmelCase = array
return tf_weights
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_tf_weights_as_numpy(A_ )
_UpperCAmelCase = convert_bigbird_pegasus(A_ ,A_ )
torch_model.save_pretrained(A_ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 357
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase_ )
class a :
_snake_case : int = 42
_snake_case : Any = 42
_snake_case : List[Any] = None
_snake_case : List[Any] = None
_snake_case : int = None
@dataclass(frozen=lowerCAmelCase_ )
class a :
_snake_case : Optional[Any] = 42
_snake_case : List[Any] = None
_snake_case : Union[str, Any] = None
_snake_case : Any = None
_snake_case : int = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 42
def __init__( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : bool = False , ):
_UpperCAmelCase = hans_processors[task]()
_UpperCAmelCase = os.path.join(
__A , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(__A ) , __A , ) , )
_UpperCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase = label_list[2], label_list[1]
_UpperCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase = cached_features_file + """.lock"""
with FileLock(__A ):
if os.path.exists(__A ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
_UpperCAmelCase = torch.load(__A )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
_UpperCAmelCase = (
processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
)
logger.info("""Training examples: %s""" , len(__A ) )
_UpperCAmelCase = hans_convert_examples_to_features(__A , __A , __A , __A )
logger.info("""Saving features into cached file %s""" , __A )
torch.save(self.features , __A )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : str , __lowerCAmelCase : int ):
return self.features[i]
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class a :
_snake_case : List[Any] = 42
def __init__( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] = 128 , __lowerCAmelCase : str=False , __lowerCAmelCase : bool = False , ):
_UpperCAmelCase = hans_processors[task]()
_UpperCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase = label_list[2], label_list[1]
_UpperCAmelCase = label_list
_UpperCAmelCase = processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
_UpperCAmelCase = hans_convert_examples_to_features(__A , __A , __A , __A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(__A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_UpperCAmelCase = tf.data.Dataset.from_generator(
__A , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCAmelCase_ ( self : Dict ):
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : List[str] , __lowerCAmelCase : Any ):
return self.features[i]
def lowerCAmelCase_ ( self : Tuple ):
return self.label_list
class a ( lowerCAmelCase_ ):
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Tuple ):
return self._create_examples(self._read_tsv(os.path.join(__A , """heuristics_train_set.txt""" ) ) , """train""" )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Any ):
return self._create_examples(self._read_tsv(os.path.join(__A , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def lowerCAmelCase_ ( self : Any ):
return ["contradiction", "entailment", "neutral"]
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = []
for i, line in enumerate(__A ):
if i == 0:
continue
_UpperCAmelCase = """%s-%s""" % (set_type, line[0])
_UpperCAmelCase = line[5]
_UpperCAmelCase = line[6]
_UpperCAmelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
_UpperCAmelCase = line[0]
examples.append(InputExample(guid=__A , text_a=__A , text_b=__A , label=__A , pairID=__A ) )
return examples
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
_UpperCAmelCase = {label: i for i, label in enumerate(_lowercase )}
_UpperCAmelCase = []
for ex_index, example in tqdm.tqdm(enumerate(_lowercase ) ,desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
_UpperCAmelCase = tokenizer(
example.text_a ,example.text_b ,add_special_tokens=_lowercase ,max_length=_lowercase ,padding="""max_length""" ,truncation=_lowercase ,return_overflowing_tokens=_lowercase ,)
_UpperCAmelCase = label_map[example.label] if example.label in label_map else 0
_UpperCAmelCase = int(example.pairID )
features.append(InputFeatures(**_lowercase ,label=_lowercase ,pairID=_lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
UpperCAmelCase__ = {
"""hans""": 3,
}
UpperCAmelCase__ = {
"""hans""": HansProcessor,
}
| 358
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 0
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 1 ,lowercase = 1 ,lowercase = 1.0E4 ,lowercase = False ,lowercase = 1.0 ,):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
_UpperCAmelCase = float(embedding_dim // 2 )
_UpperCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_UpperCAmelCase = min_timescale * jnp.exp(jnp.arange(_a ,dtype=jnp.floataa ) * -log_timescale_increment )
_UpperCAmelCase = jnp.expand_dims(_a ,1 ) * jnp.expand_dims(_a ,0 )
# scale embeddings
_UpperCAmelCase = scale * emb
if flip_sin_to_cos:
_UpperCAmelCase = jnp.concatenate([jnp.cos(_a ), jnp.sin(_a )] ,axis=1 )
else:
_UpperCAmelCase = jnp.concatenate([jnp.sin(_a ), jnp.cos(_a )] ,axis=1 )
_UpperCAmelCase = jnp.reshape(_a ,[jnp.shape(_a )[0], embedding_dim] )
return signal
class a ( nn.Module ):
_snake_case : Any = 32
_snake_case : int = jnp.floataa
@nn.compact
def __call__( self : Optional[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(lowerCamelCase_ )
_UpperCAmelCase = nn.silu(lowerCamelCase_ )
_UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(lowerCamelCase_ )
return temb
class a ( nn.Module ):
_snake_case : Dict = 32
_snake_case : int = False
_snake_case : Any = 1
@nn.compact
def __call__( self : str , __lowerCAmelCase : List[str] ):
return get_sinusoidal_embeddings(
lowerCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=13 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=99 , __lowerCAmelCase : int=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Union[str, Any]=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : int=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
_UpperCAmelCase = BioGptModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , ):
_UpperCAmelCase = BioGptForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = BioGptModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# create attention mask
_UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case__ )
_UpperCAmelCase = self.seq_length // 2
_UpperCAmelCase = 0
# first forward pass
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_UpperCAmelCase = ids_tensor((1,) , snake_case__ ).item() + 1
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_UpperCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case__ )] , dim=1 , )
# get two different outputs
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
_UpperCAmelCase = model(snake_case__ , past_key_values=snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = BioGptModel(config=snake_case__ ).to(snake_case__ ).eval()
_UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case__ )
# first forward pass
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
_UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[
'''last_hidden_state'''
]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=False ):
_UpperCAmelCase = BioGptForCausalLM(snake_case__ )
model.to(snake_case__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , *__lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = BioGptModel(snake_case__ )
_UpperCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = BioGptForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
_snake_case : Union[str, Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_snake_case : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
_snake_case : int = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Optional[int] = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = BioGptModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*snake_case__ )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case__ )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case__ , gradient_checkpointing=snake_case__ )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case__ )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case__ )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case__ )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case__ )
_UpperCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_UpperCAmelCase = '''left'''
# Define PAD Token = EOS Token = 50256
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
_UpperCAmelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCAmelCase = tokenizer(snake_case__ , return_tensors="""pt""" , padding=snake_case__ )
_UpperCAmelCase = inputs['''input_ids'''].to(snake_case__ )
_UpperCAmelCase = model.generate(
input_ids=snake_case__ , attention_mask=inputs["""attention_mask"""].to(snake_case__ ) , )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_UpperCAmelCase = model.generate(input_ids=snake_case__ )
_UpperCAmelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_UpperCAmelCase = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
_UpperCAmelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
@slow
def lowerCAmelCase_ ( self : int ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = BioGptModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(snake_case__ )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = BioGptForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''multi_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(snake_case__ )
_UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase = BioGptForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
_UpperCAmelCase = torch.tensor([[2, 4805, 9, 656, 21]] )
_UpperCAmelCase = model(snake_case__ )[0]
_UpperCAmelCase = 4_2384
_UpperCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
_UpperCAmelCase = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_UpperCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case__ )
torch.manual_seed(0 )
_UpperCAmelCase = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case__ )
_UpperCAmelCase = model.generate(
**snake_case__ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case__ , )
_UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
_UpperCAmelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(snake_case__ , snake_case__ )
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Any ):
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 362
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def lowerCAmelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
_UpperCAmelCase = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCAmelCase = DDPMScheduler()
_UpperCAmelCase = AudioDiffusionPipeline(vqvae=a__ , unet=self.dummy_unet , mel=a__ , scheduler=a__ )
_UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=a__ , steps=4 )
_UpperCAmelCase = output.audios[0]
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=a__ , steps=4 , return_dict=a__ )
_UpperCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_UpperCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
_UpperCAmelCase = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vqvae_and_unet
_UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a__ , scheduler=a__ )
_UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
np.random.seed(0 )
_UpperCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(42 )
_UpperCAmelCase = pipe(raw_audio=a__ , generator=a__ , start_step=5 , steps=10 )
_UpperCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_UpperCAmelCase = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase = self.dummy_unet_condition
_UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a__ , mel=a__ , scheduler=a__ )
_UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
np.random.seed(0 )
_UpperCAmelCase = torch.rand((1, 1, 10) )
_UpperCAmelCase = pipe(generator=a__ , encoding=a__ )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_UpperCAmelCase = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
_UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=a__ )
_UpperCAmelCase = output.audios[0]
_UpperCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_UpperCAmelCase = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 363
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
_snake_case : Union[str, Any] = IFInpaintingPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_snake_case : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase_ ( self : Optional[int] ):
return self._get_dummy_components()
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=0 ):
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
_UpperCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase_ ( self : Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase_ ( self : List[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCAmelCase_ ( self : str ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.