code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 1.5
_UpperCAmelCase = int(factor * num_class_images )
_UpperCAmelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=lowercase ,aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' ,exist_ok=lowercase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCAmelCase = client.query(text=lowercase )
if len(lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
_UpperCAmelCase = int(factor * num_images )
_UpperCAmelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=lowercase ,aesthetic_weight=0.1 ,)
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = tqdm(desc="""downloading real regularization images""" ,total=lowercase )
with open(f'''{class_data_dir}/caption.txt''' ,"""w""" ) as fa, open(f'''{class_data_dir}/urls.txt''' ,"""w""" ) as fa, open(
f'''{class_data_dir}/images.txt''' ,"""w""" ) as fa:
while total < num_class_images:
_UpperCAmelCase = class_images[count]
count += 1
try:
_UpperCAmelCase = requests.get(images["""url"""] )
if img.status_code == 2_00:
_UpperCAmelCase = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' ,"""wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser("""""" ,add_help=lowercase )
parser.add_argument("""--class_prompt""" ,help="""text prompt to retrieve images""" ,required=lowercase ,type=lowercase )
parser.add_argument("""--class_data_dir""" ,help="""path to save images""" ,required=lowercase ,type=lowercase )
parser.add_argument("""--num_class_images""" ,help="""number of images to download""" ,default=2_00 ,type=lowercase )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Dict ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase = DisjunctiveConstraint(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(3 )
_UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase = DisjunctiveConstraint(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 30
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
_UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_UpperCAmelCase = 1
if upper_limit > 0:
_UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
UpperCAmelCase__ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 1
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class a ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : int ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Tuple=None ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Optional[int] ):
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=None ):
_UpperCAmelCase = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int ):
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
"""generated_text""": self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 30
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 1
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = 3
_UpperCAmelCase = 250
_UpperCAmelCase = ids_tensor((batch_size, length) , __lowerCAmelCase )
_UpperCAmelCase = torch.ones((batch_size, length) , device=__lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(5 )
_UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MaxLengthCriteria(max_length=10 )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self._get_tensors(5 )
_UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[str] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
| 30
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 1
|
"""simple docstring"""
import argparse
import datetime
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
_UpperCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
_UpperCAmelCase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
_UpperCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
_UpperCAmelCase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
_UpperCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
_UpperCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
_UpperCAmelCase = datetime.date(int(lowercase ) ,int(lowercase ) ,int(lowercase ) )
# Start math
if m <= 2:
_UpperCAmelCase = y - 1
_UpperCAmelCase = m + 12
# maths var
_UpperCAmelCase = int(str(lowercase )[:2] )
_UpperCAmelCase = int(str(lowercase )[2:] )
_UpperCAmelCase = int(2.6 * m - 5.39 )
_UpperCAmelCase = int(c / 4 )
_UpperCAmelCase = int(k / 4 )
_UpperCAmelCase = int(d + k )
_UpperCAmelCase = int(t + u + v + x )
_UpperCAmelCase = int(z - (2 * c) )
_UpperCAmelCase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
_UpperCAmelCase = f'''Your date {date_input}, is a {days[str(lowercase )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
UpperCAmelCase__ = parser.parse_args()
zeller(args.date_input)
| 30
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : List[str]=10 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : str=10 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Dict=0.9 , __lowerCAmelCase : int=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = mask_ratio
_UpperCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_UpperCAmelCase = int(mask_ratio * self.seq_length )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : str ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = VideoMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = VideoMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCAmelCase = torch.ones((self.num_masks,) )
_UpperCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_UpperCAmelCase = mask.expand(self.batch_size , -1 ).bool()
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
# model only returns predictions for masked patches
_UpperCAmelCase = mask.sum().item()
_UpperCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[str] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_snake_case : Any = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_snake_case : List[str] = False
_snake_case : Optional[Any] = False
_snake_case : List[Any] = False
_snake_case : Dict = False
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = VideoMAEModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=False ):
_UpperCAmelCase = copy.deepcopy(__lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCAmelCase = torch.ones((self.model_tester.num_masks,) )
_UpperCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_UpperCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
_UpperCAmelCase = bool_masked_pos.to(__lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(__lowerCAmelCase ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = VideoMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
if not self.has_attentions:
pass
else:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCAmelCase = len(__lowerCAmelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCAmelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase_ ( self : str ):
def check_hidden_states_output(__lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
_UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" ,filename="""eating_spaghetti.npy""" ,repo_type="""dataset""" )
_UpperCAmelCase = np.load(lowercase )
return list(lowercase )
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : List[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
__lowerCAmelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(__lowerCAmelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# add boolean mask, indicating which patches to mask
_UpperCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
# verify the logits
_UpperCAmelCase = torch.Size([1, 1408, 1536] )
_UpperCAmelCase = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=__lowerCAmelCase )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_UpperCAmelCase = torch.tensor([0.5_142] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_UpperCAmelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=__lowerCAmelCase ).to(
__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(torch.tensor([0.6_469] ) , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCAmelCase , atol=1e-4 ) )
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 1
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = DatasetDict(
{
"""train""": dataset["""train"""].select(lowercase ),
"""validation""": dataset["""train"""].select(lowercase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""test"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# New Code #
_UpperCAmelCase = []
# Download the dataset
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
# Create our splits
_UpperCAmelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
# New Code #
# Create our folds:
_UpperCAmelCase = kfold.split(np.zeros(datasets["""train"""].num_rows ) ,datasets["""train"""]["""label"""] )
_UpperCAmelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowercase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_fold_dataloaders(
lowercase ,lowercase ,lowercase ,lowercase ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase = []
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowercase ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase = torch.cat(lowercase ,dim=0 )
_UpperCAmelCase = torch.stack(lowercase ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase = metric.compute(predictions=lowercase ,references=lowercase )
accelerator.print("""Average test metrics from all folds:""" ,lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" ,type=lowercase ,default=3 ,help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 30
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Union[List[PIL.Image.Image], np.ndarray]
_snake_case : Optional[List[bool]]
_snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = None ):
"""simple docstring"""
_UpperCAmelCase = tesseract_config if tesseract_config is not None else """"""
# apply OCR
_UpperCAmelCase = to_pil_image(lowercase )
_UpperCAmelCase , _UpperCAmelCase = pil_image.size
_UpperCAmelCase = pytesseract.image_to_data(lowercase ,lang=lowercase ,output_type="""dict""" ,config=lowercase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_UpperCAmelCase = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
_UpperCAmelCase = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCAmelCase = []
for x, y, w, h in zip(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
_UpperCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase ,lowercase ,lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( lowerCAmelCase_ ):
_snake_case : Any = ['pixel_values']
def __init__( self : List[str] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = "" , **__lowerCAmelCase : Dict , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = size if size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = apply_ocr
_UpperCAmelCase = ocr_lang
_UpperCAmelCase = tesseract_config
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Union[str, Any] , ):
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_UpperCAmelCase = (size["""height"""], size["""width"""])
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Tuple , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCAmelCase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_UpperCAmelCase = []
_UpperCAmelCase = []
for image in images:
_UpperCAmelCase , _UpperCAmelCase = apply_tesseract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
words_batch.append(__lowerCAmelCase )
boxes_batch.append(__lowerCAmelCase )
if do_resize:
_UpperCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_UpperCAmelCase = [flip_channel_order(__lowerCAmelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_UpperCAmelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=__lowerCAmelCase )
if apply_ocr:
_UpperCAmelCase = words_batch
_UpperCAmelCase = boxes_batch
return data
| 30
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( lowerCAmelCase_ ):
_snake_case : UNetaDModel
_snake_case : KarrasVeScheduler
def __init__( self : str , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Dict , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ):
_UpperCAmelCase = self.unet.config.sample_size
_UpperCAmelCase = (batch_size, 3, img_size, img_size)
_UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_UpperCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_UpperCAmelCase = self.scheduler.schedule[t]
_UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_UpperCAmelCase , _UpperCAmelCase = self.scheduler.add_noise_to_input(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_UpperCAmelCase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_UpperCAmelCase = self.scheduler.step_correct(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , step_output.prev_sample , step_output["""derivative"""] , )
_UpperCAmelCase = step_output.prev_sample
_UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# New Code #
_UpperCAmelCase = int(args.gradient_accumulation_steps )
_UpperCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
_UpperCAmelCase = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
with LocalSGD(
accelerator=lowercase ,model=lowercase ,local_sgd_steps=lowercase ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=lowercase ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=lowercase ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 30
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
class a :
def __init__( self : Tuple , __lowerCAmelCase : int ):
_UpperCAmelCase = size
_UpperCAmelCase = [0] * size
_UpperCAmelCase = [0] * size
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : int ):
return index | (index + 1)
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : int ):
return (index & (index + 1)) - 1
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = value
while index < self.size:
_UpperCAmelCase = self.get_prev(__lowerCAmelCase ) + 1
if current_left_border == index:
_UpperCAmelCase = value
else:
_UpperCAmelCase = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = self.get_next(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
right -= 1 # Because of right is exclusive
_UpperCAmelCase = 0
while left <= right:
_UpperCAmelCase = self.get_prev(__lowerCAmelCase )
if left <= current_left:
_UpperCAmelCase = max(__lowerCAmelCase , self.tree[right] )
_UpperCAmelCase = current_left
else:
_UpperCAmelCase = max(__lowerCAmelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase__ = get_tests_dir("""fixtures""")
UpperCAmelCase__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
UpperCAmelCase__ = get_tests_dir("""fixtures/dummy-config.json""")
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = 0
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase ).to_dict()
config_dict.pop("""feature_extractor_type""" )
_UpperCAmelCase = WavaVecaFeatureExtractor(**__lowerCAmelCase )
# save in new folder
model_config.save_pretrained(__lowerCAmelCase )
config.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
# make sure private variable is not incorrectly saved
_UpperCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
with self.assertRaisesRegex(
__lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase_ ( self : str ):
with self.assertRaisesRegex(
__lowerCAmelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCAmelCase ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__lowerCAmelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : str ):
class a ( lowerCAmelCase_ ):
_snake_case : str = True
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
# If remote code is not set, the default is to use local
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(__lowerCAmelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 30
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 1
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( lowerCAmelCase_ ):
def __init__( self : List[str] , *__lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=None , **__lowerCAmelCase : Any ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = eval_examples
_UpperCAmelCase = post_process_function
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : str = "eval" ):
_UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase = self.get_eval_dataloader(__lowerCAmelCase )
_UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase = time.time()
try:
_UpperCAmelCase = eval_loop(
__lowerCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
_UpperCAmelCase = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
_UpperCAmelCase = metrics.pop(__lowerCAmelCase )
metrics.update(output.metrics )
else:
_UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str = "test" ):
_UpperCAmelCase = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase = time.time()
try:
_UpperCAmelCase = eval_loop(
__lowerCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , """predict""" )
_UpperCAmelCase = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
_UpperCAmelCase = metrics.pop(__lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
| 30
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
UpperCAmelCase__ = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
UpperCAmelCase__ = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(lowercase )
return pairs
class a ( lowerCAmelCase_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple="<s>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Union[str, Any]="</s>" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Tuple="<mask>" , **__lowerCAmelCase : Tuple , ):
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = merges_file
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = 3
self.add_from_file(__lowerCAmelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[:-1]
_UpperCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
_UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__lowerCAmelCase )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__lowerCAmelCase ):
try:
_UpperCAmelCase = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__lowerCAmelCase )
_UpperCAmelCase = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__lowerCAmelCase )
_UpperCAmelCase = """@@ """.join(__lowerCAmelCase )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
return word
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str ):
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(R"""\S+\n?""" , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] ):
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str ):
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = """ """.join(__lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.merges_file , __lowerCAmelCase )
return out_vocab_file, out_merge_file
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[str] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
_UpperCAmelCase = f.readlines()
for lineTmp in lines:
_UpperCAmelCase = lineTmp.strip()
_UpperCAmelCase = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
_UpperCAmelCase = line[:idx]
_UpperCAmelCase = len(self.encoder )
| 30
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 1
|
"""simple docstring"""
import itertools
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = 2
while True:
if is_prime(lowercase ):
yield num
num += 1
def __UpperCAmelCase ( lowercase = 1_00_01 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,lowercase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 1
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
_UpperCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' ,lowercase ,)
is not None
):
_UpperCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCAmelCase = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
_UpperCAmelCase = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
_UpperCAmelCase = True
if not attribute_used:
_UpperCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCAmelCase = True
elif attribute.endswith("""_token_id""" ):
_UpperCAmelCase = True
# configuration class specific cases
if not case_allowed:
_UpperCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
_UpperCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCAmelCase = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
_UpperCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCAmelCase = {}
if len(config_class.attribute_map ) > 0:
_UpperCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCAmelCase = inspect.getsourcefile(lowercase )
_UpperCAmelCase = os.path.dirname(lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCAmelCase = [os.path.join(lowercase ,lowercase ) for fn in os.listdir(lowercase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
_UpperCAmelCase = []
for path in modeling_paths:
if os.path.isfile(lowercase ):
with open(lowercase ) as fp:
modeling_sources.append(fp.read() )
_UpperCAmelCase = []
for config_param, default_value in zip(lowercase ,lowercase ):
# `attributes` here is all the variant names for `config_param`
_UpperCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowercase ,lowercase ,lowercase ,lowercase ):
unused_attributes.append(attributes[0] )
return sorted(lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda lowercase : inspect.isclass(lowercase )
and issubclass(lowercase ,lowercase )
and inspect.getmodule(lowercase ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
_UpperCAmelCase = check_config_attributes_being_used(lowercase )
if len(lowercase ) > 0:
_UpperCAmelCase = unused_attributes
if len(lowercase ) > 0:
_UpperCAmelCase = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowercase )
if __name__ == "__main__":
check_config_attributes()
| 30
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 30
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = 'megatron-bert'
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[str]=2_9056 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : List[Any]=24 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1e-1_2 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : Optional[int]="absolute" , __lowerCAmelCase : int=True , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
| 30
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 4_00_00_00 ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowercase )
_UpperCAmelCase , _UpperCAmelCase = b, a + b
return sum(lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 1
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = OmegaConf.load(lowercase )
_UpperCAmelCase = torch.load(lowercase ,map_location="""cpu""" )["""model"""]
_UpperCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase = {}
_UpperCAmelCase = """first_stage_model."""
for key in keys:
if key.startswith(lowercase ):
_UpperCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase = {}
_UpperCAmelCase = """model.diffusion_model."""
for key in keys:
if key.startswith(lowercase ):
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase = config.model.params.first_stage_config.params
_UpperCAmelCase = config.model.params.unet_config.params
_UpperCAmelCase = VQModel(**lowercase ).eval()
vqvae.load_state_dict(lowercase )
_UpperCAmelCase = UNetLDMModel(**lowercase ).eval()
unet.load_state_dict(lowercase )
_UpperCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule="""scaled_linear""" ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=lowercase ,)
_UpperCAmelCase = LDMPipeline(lowercase ,lowercase ,lowercase )
pipeline.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 30
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 1
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model
_UpperCAmelCase = AlbertConfig.from_json_file(lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase = AlbertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 30
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 1
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
UpperCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase__ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase__ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase__ = re.compile(r"""^\s*else:""")
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if _re_test_backend.search(lowercase ) is None:
return None
_UpperCAmelCase = [b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = 0
while line_index < len(lowercase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
_UpperCAmelCase = _re_one_line_import_struct.search(lowercase ).groups()[0]
_UpperCAmelCase = re.findall(R"""\[([^\]]+)\]""" ,lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_UpperCAmelCase = _re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
_UpperCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_UpperCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
_UpperCAmelCase = _re_import_struct_add_many.search(lowercase ).groups()[0].split(""", """ )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
_UpperCAmelCase = _re_between_brackets.search(lowercase ).groups()[0].split(""", """ )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase = []
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def find_duplicates(lowercase ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase = []
for key in import_dict_objects.keys():
_UpperCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
_UpperCAmelCase = os.path.join(lowercase ,"""__init__.py""" )
_UpperCAmelCase = parse_init(lowercase )
if objects is not None:
_UpperCAmelCase = analyze_results(*lowercase )
if len(lowercase ) > 0:
_UpperCAmelCase = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError("""\n\n""".join(lowercase ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_UpperCAmelCase = str((Path(lowercase ) / folder).relative_to(lowercase ) )
_UpperCAmelCase = short_path.replace(os.path.sep ,""".""" )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase = str((Path(lowercase ) / fname).relative_to(lowercase ) )
_UpperCAmelCase = short_path.replace(""".py""" ,"""""" ).replace(os.path.sep ,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowercase )
return submodules
UpperCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __UpperCAmelCase ( ):
"""simple docstring"""
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_UpperCAmelCase = direct_transformers_import(lowercase )
_UpperCAmelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase ,"""__init__.py""" ) ,"""r""" ) as f:
_UpperCAmelCase = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" ,lowercase ) ) )
_UpperCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase ) > 0:
_UpperCAmelCase = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 30
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 60_08_51_47_51_43 ):
"""simple docstring"""
try:
_UpperCAmelCase = int(lowercase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_UpperCAmelCase = 2
_UpperCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_UpperCAmelCase = i
while n % i == 0:
_UpperCAmelCase = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 1
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""ConvNextFeatureExtractor"""]
UpperCAmelCase__ = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
import math
def __UpperCAmelCase ( lowercase = 1_00 ):
"""simple docstring"""
_UpperCAmelCase = sum(i * i for i in range(1 ,n + 1 ) )
_UpperCAmelCase = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_UpperCAmelCase , _UpperCAmelCase = array[indexa], array[indexa]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if length > 1:
_UpperCAmelCase = int(length / 2 )
for i in range(lowercase ,low + middle ):
comp_and_swap(lowercase ,lowercase ,i + middle ,lowercase )
bitonic_merge(lowercase ,lowercase ,lowercase ,lowercase )
bitonic_merge(lowercase ,low + middle ,lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if length > 1:
_UpperCAmelCase = int(length / 2 )
bitonic_sort(lowercase ,lowercase ,lowercase ,1 )
bitonic_sort(lowercase ,low + middle ,lowercase ,0 )
bitonic_merge(lowercase ,lowercase ,lowercase ,lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 30
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'char'
_snake_case : int = 'bpe'
_snake_case : Optional[Any] = 'wp'
UpperCAmelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = ['image_processor', 'char_tokenizer']
_snake_case : Tuple = 'ViTImageProcessor'
_snake_case : Any = 'MgpstrTokenizer'
def __init__( self : Optional[int] , __lowerCAmelCase : int=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCAmelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
_UpperCAmelCase = tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained("""gpt2""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : Union[str, Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Any ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_UpperCAmelCase = self.char_tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Dict ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = sequences
_UpperCAmelCase = char_preds.size(0 )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__lowerCAmelCase , """char""" )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__lowerCAmelCase , """bpe""" )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__lowerCAmelCase , """wp""" )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(__lowerCAmelCase ):
_UpperCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
_UpperCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
_UpperCAmelCase = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_UpperCAmelCase = {}
_UpperCAmelCase = final_strs
_UpperCAmelCase = final_scores
_UpperCAmelCase = char_strs
_UpperCAmelCase = bpe_strs
_UpperCAmelCase = wp_strs
return out
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
if format == DecodeType.CHARACTER:
_UpperCAmelCase = self.char_decode
_UpperCAmelCase = 1
_UpperCAmelCase = """[s]"""
elif format == DecodeType.BPE:
_UpperCAmelCase = self.bpe_decode
_UpperCAmelCase = 2
_UpperCAmelCase = """#"""
elif format == DecodeType.WORDPIECE:
_UpperCAmelCase = self.wp_decode
_UpperCAmelCase = 102
_UpperCAmelCase = """[SEP]"""
else:
raise ValueError(f'''Format {format} is not supported.''' )
_UpperCAmelCase , _UpperCAmelCase = [], []
_UpperCAmelCase = pred_logits.size(0 )
_UpperCAmelCase = pred_logits.size(1 )
_UpperCAmelCase , _UpperCAmelCase = pred_logits.topk(1 , dim=-1 , largest=__lowerCAmelCase , sorted=__lowerCAmelCase )
_UpperCAmelCase = preds_index.view(-1 , __lowerCAmelCase )[:, 1:]
_UpperCAmelCase = decoder(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = torch.nn.functional.softmax(__lowerCAmelCase , dim=2 ).max(dim=2 )
_UpperCAmelCase = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_UpperCAmelCase = preds_str[index].find(__lowerCAmelCase )
_UpperCAmelCase = preds_str[index][:pred_eos]
_UpperCAmelCase = preds_index[index].cpu().tolist()
_UpperCAmelCase = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_UpperCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
_UpperCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Dict ):
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = os.path.dirname(os.path.realpath(lowercase ) )
_UpperCAmelCase = os.path.join(lowercase ,"""words.txt""" )
_UpperCAmelCase = """"""
with open(lowercase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
_UpperCAmelCase = [
word
for word in [sum(ord(lowercase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase )
if __name__ == "__main__":
print(solution())
| 30
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a ( lowerCAmelCase_ ):
_snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
_snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
_snake_case : Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = v.to_dict()
return d
| 30
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_enforce_args(lowercase ,lowercase )
if n == 0:
return 0
_UpperCAmelCase = float("""-inf""" )
for i in range(1 ,n + 1 ):
_UpperCAmelCase = max(
lowercase ,prices[i - 1] + naive_cut_rod_recursive(n - i ,lowercase ) )
return max_revue
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_enforce_args(lowercase ,lowercase )
_UpperCAmelCase = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase ,lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_UpperCAmelCase = float("""-inf""" )
for i in range(1 ,n + 1 ):
_UpperCAmelCase = max(
lowercase ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,lowercase ,lowercase ) ,)
_UpperCAmelCase = max_revenue
return max_rev[n]
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_enforce_args(lowercase ,lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_UpperCAmelCase = [float("""-inf""" ) for _ in range(n + 1 )]
_UpperCAmelCase = 0
for i in range(1 ,n + 1 ):
_UpperCAmelCase = max_rev[i]
for j in range(1 ,i + 1 ):
_UpperCAmelCase = max(lowercase ,prices[j - 1] + max_rev[i - j] )
_UpperCAmelCase = max_revenue_i
return max_rev[n]
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if n < 0:
_UpperCAmelCase = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowercase )
if n > len(lowercase ):
_UpperCAmelCase = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(lowercase )}'''
)
raise ValueError(lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = [6, 10, 12, 15, 20, 23]
_UpperCAmelCase = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_UpperCAmelCase = 36
_UpperCAmelCase = top_down_cut_rod(lowercase ,lowercase )
_UpperCAmelCase = bottom_up_cut_rod(lowercase ,lowercase )
_UpperCAmelCase = naive_cut_rod_recursive(lowercase ,lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 30
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""ConditionalDetrFeatureExtractor"""]
UpperCAmelCase__ = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowercase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 30
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 1
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a ( lowerCAmelCase_ ):
_snake_case : Optional[int] = CustomTokenizer
pass
| 30
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Dict=99 , __lowerCAmelCase : Dict=32 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[Any]=37 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Dict=4 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
@require_flax
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_UpperCAmelCase = (1, 11, 768)
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
| 30
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""BeitFeatureExtractor"""]
UpperCAmelCase__ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(lowercase ,lowercase ,bias=lowercase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def __UpperCAmelCase ( lowercase ,lowercase="facebook/mbart-large-en-ro" ,lowercase=False ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = torch.load(lowercase ,map_location="""cpu""" )["""model"""]
remove_ignore_keys_(lowercase )
_UpperCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_UpperCAmelCase = MBartConfig.from_pretrained(lowercase ,vocab_size=lowercase )
if mbart_aa and finetuned:
_UpperCAmelCase = """relu"""
_UpperCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
_UpperCAmelCase = MBartForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase )
if finetuned:
_UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 30
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase__ = 1.6021E-19 # units = C
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 1
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = ['input_features', 'attention_mask']
def __init__( self : Optional[Any] , __lowerCAmelCase : str=80 , __lowerCAmelCase : int=1_6000 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=10 , __lowerCAmelCase : List[str]=25 , __lowerCAmelCase : Any="hamming_window" , __lowerCAmelCase : List[str]=32_768.0 , __lowerCAmelCase : str=0.97 , __lowerCAmelCase : Optional[int]=1.0 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=False , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = feature_size
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = padding_value
_UpperCAmelCase = hop_length
_UpperCAmelCase = win_length
_UpperCAmelCase = frame_signal_scale
_UpperCAmelCase = preemphasis_coeff
_UpperCAmelCase = mel_floor
_UpperCAmelCase = normalize_means
_UpperCAmelCase = normalize_vars
_UpperCAmelCase = win_function
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = win_length * sampling_rate // 1000
_UpperCAmelCase = hop_length * sampling_rate // 1000
_UpperCAmelCase = optimal_fft_length(self.sample_size )
_UpperCAmelCase = (self.n_fft // 2) + 1
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : np.array ):
if self.win_function == "hamming_window":
_UpperCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase )
else:
_UpperCAmelCase = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase = spectrogram(
one_waveform * self.frame_signal_scale , window=__lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=__lowerCAmelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ):
# make sure we normalize float32 arrays
if self.normalize_means:
_UpperCAmelCase = x[:input_length].mean(axis=0 )
_UpperCAmelCase = np.subtract(__lowerCAmelCase , __lowerCAmelCase )
if self.normalize_vars:
_UpperCAmelCase = x[:input_length].std(axis=0 )
_UpperCAmelCase = np.divide(__lowerCAmelCase , __lowerCAmelCase )
if input_length < x.shape[0]:
_UpperCAmelCase = padding_value
# make sure array is in float32
_UpperCAmelCase = x.astype(np.floataa )
return x
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : Optional[np.ndarray] = None ):
_UpperCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__lowerCAmelCase , __lowerCAmelCase , self.padding_value ) for x, n in zip(__lowerCAmelCase , __lowerCAmelCase )]
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Optional[Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
_UpperCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [raw_speech]
# extract fbank features
_UpperCAmelCase = [self._extract_mfsc_features(__lowerCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase = BatchFeature({"""input_features""": features} )
_UpperCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
# make sure list is in array format
_UpperCAmelCase = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __lowerCAmelCase ):
_UpperCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
_UpperCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase = (
np.array(__lowerCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__lowerCAmelCase )
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
| 30
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 1
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = 'masked_bert'
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=3_0522 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=1e-1_2 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[int]="topK" , __lowerCAmelCase : Dict="constant" , __lowerCAmelCase : List[Any]=0.0 , **__lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = pruning_method
_UpperCAmelCase = mask_init
_UpperCAmelCase = mask_scale
| 30
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return base * power(lowercase ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
UpperCAmelCase__ = int(input("""Enter the base: """).strip())
UpperCAmelCase__ = int(input("""Enter the exponent: """).strip())
UpperCAmelCase__ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
UpperCAmelCase__ = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 30
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 1
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def lowerCAmelCase_ ( *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Any ):
pass
@is_pipeline_test
@require_vision
class a ( unittest.TestCase ):
@require_torch
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase = image_classifier(__lowerCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase = image_classifier(__lowerCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.333, """label""": ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase = image_classifier(__lowerCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase = image_classifier(__lowerCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 1
|
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class a ( unittest.TestCase ):
def __init__( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[int]=56 , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Any=99 , __lowerCAmelCase : Any=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Optional[Any]="gelu_new" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[Any]=512 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict="block_sparse" , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Dict=3 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
_UpperCAmelCase = rescale_embeddings
_UpperCAmelCase = attention_type
_UpperCAmelCase = use_bias
_UpperCAmelCase = block_size
_UpperCAmelCase = num_random_blocks
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_snake_case : List[Any] = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : str ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : str ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : str ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase_ ( self : List[str] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_class(__lowerCAmelCase )
@jax.jit
def model_jitted(__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=None , **__lowerCAmelCase : List[Any] ):
return model(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = model_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=1e-5 , __lowerCAmelCase : List[str]="outputs" , __lowerCAmelCase : Any=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
UpperCAmelCase__ = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase__ = {
"""allenai/longformer-base-4096""": 4_0_9_6,
"""allenai/longformer-large-4096""": 4_0_9_6,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_0_9_6,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_0_9_6,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(lowercase ) for n in cs]
return dict(zip(lowercase ,lowercase ) )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : str="<unk>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : List[Any]="<mask>" , __lowerCAmelCase : Dict=False , **__lowerCAmelCase : Union[str, Any] , ):
_UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else bos_token
_UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else eos_token
_UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else sep_token
_UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else cls_token
_UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
_UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(__lowerCAmelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Tuple ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__lowerCAmelCase )
_UpperCAmelCase = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__lowerCAmelCase ):
try:
_UpperCAmelCase = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__lowerCAmelCase )
_UpperCAmelCase = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__lowerCAmelCase )
_UpperCAmelCase = """ """.join(__lowerCAmelCase )
_UpperCAmelCase = word
return word
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict ):
_UpperCAmelCase = []
for token in re.findall(self.pat , __lowerCAmelCase ):
_UpperCAmelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCAmelCase ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] ):
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[Any] ):
return self.decoder.get(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = """""".join(__lowerCAmelCase )
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + """\n""" )
_UpperCAmelCase = 0
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(__lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False , **__lowerCAmelCase : Dict ):
_UpperCAmelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCAmelCase ) > 0 and not text[0].isspace()):
_UpperCAmelCase = """ """ + text
return (text, kwargs)
| 30
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'luke'
def __init__( self : Any , __lowerCAmelCase : str=5_0267 , __lowerCAmelCase : Dict=50_0000 , __lowerCAmelCase : List[str]=768 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Dict=512 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=1e-1_2 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[Any]=2 , **__lowerCAmelCase : str , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = entity_vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = entity_emb_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_entity_aware_attention
_UpperCAmelCase = classifier_dropout
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self : Any ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__lowerCAmelCase , vqvae=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__lowerCAmelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__lowerCAmelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 30
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 1
|
"""simple docstring"""
UpperCAmelCase__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_5818,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_UpperCAmelCase = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : str = 'roberta-prelayernorm'
def __init__( self : str , __lowerCAmelCase : Optional[Any]=5_0265 , __lowerCAmelCase : List[str]=768 , __lowerCAmelCase : int=12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Dict=3072 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : int=1e-1_2 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Union[str, Any]="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=None , **__lowerCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 30
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 1
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = IFInpaintingSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase_ ( self : List[Any] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase_ ( self : str ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase_ ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase_ ( self : List[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase_ ( self : Tuple ):
self._test_save_load_local()
def lowerCAmelCase_ ( self : List[str] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 30
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCAmelCase__ = logging.get_logger(__name__)
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Union[str, Any] , **__lowerCAmelCase : Optional[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_UpperCAmelCase = deprecated_arg[3:]
setattr(self , __lowerCAmelCase , not kwargs.pop(__lowerCAmelCase ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
_UpperCAmelCase = kwargs.pop("""torchscript""" , self.torchscript )
_UpperCAmelCase = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
_UpperCAmelCase = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowerCAmelCase )
_snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Trace the models using torchscript'} )
_snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
_snake_case : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCAmelCase_ ( self : int ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
_UpperCAmelCase = torch.device("""cpu""" )
_UpperCAmelCase = 0
elif is_torch_tpu_available():
_UpperCAmelCase = xm.xla_device()
_UpperCAmelCase = 0
else:
_UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_UpperCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase_ ( self : str ):
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase_ ( self : List[Any] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowerCAmelCase_ ( self : Tuple ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self.n_gpu > 0
| 30
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : int ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__lowerCAmelCase , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
_UpperCAmelCase = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCAmelCase , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCAmelCase , py_version="""py36""" , )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[str] ):
TrainingJobAnalytics(__lowerCAmelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[Any] ):
# create estimator
_UpperCAmelCase = self.create_estimator(__lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCAmelCase )
| 30
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 1
|
"""simple docstring"""
import operator as op
UpperCAmelCase__ = """scaler.pt"""
UpperCAmelCase__ = """pytorch_model"""
UpperCAmelCase__ = """random_states"""
UpperCAmelCase__ = """optimizer"""
UpperCAmelCase__ = """scheduler"""
UpperCAmelCase__ = """pytorch_model.bin"""
UpperCAmelCase__ = """pytorch_model.bin.index.json"""
UpperCAmelCase__ = """model.safetensors"""
UpperCAmelCase__ = """model.safetensors.index.json"""
UpperCAmelCase__ = """1.10.2"""
UpperCAmelCase__ = """py38"""
UpperCAmelCase__ = """4.17.0"""
UpperCAmelCase__ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
UpperCAmelCase__ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
UpperCAmelCase__ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
UpperCAmelCase__ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
UpperCAmelCase__ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
UpperCAmelCase__ = """2.0.1"""
UpperCAmelCase__ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
UpperCAmelCase__ = ["""default""", """reduce-overhead""", """max-autotune"""]
UpperCAmelCase__ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase__ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
UpperCAmelCase__ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
UpperCAmelCase__ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 30
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """decord""" )
self.check_model_type(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int=None , __lowerCAmelCase : int=None ):
_UpperCAmelCase = {}
if frame_sampling_rate is not None:
_UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
_UpperCAmelCase = num_frames
_UpperCAmelCase = {}
if top_k is not None:
_UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str]] , **__lowerCAmelCase : Any ):
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=1 ):
if num_frames is None:
_UpperCAmelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
_UpperCAmelCase = BytesIO(requests.get(__lowerCAmelCase ).content )
_UpperCAmelCase = VideoReader(__lowerCAmelCase )
videoreader.seek(0 )
_UpperCAmelCase = 0
_UpperCAmelCase = num_frames * frame_sampling_rate - 1
_UpperCAmelCase = np.linspace(__lowerCAmelCase , __lowerCAmelCase , num=__lowerCAmelCase , dtype=np.intaa )
_UpperCAmelCase = videoreader.get_batch(__lowerCAmelCase ).asnumpy()
_UpperCAmelCase = list(__lowerCAmelCase )
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : str ):
_UpperCAmelCase = self.model(**__lowerCAmelCase )
return model_outputs
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=5 ):
if top_k > self.model.config.num_labels:
_UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
_UpperCAmelCase , _UpperCAmelCase = probs.topk(__lowerCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 30
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 1
|
"""simple docstring"""
class a :
def __init__( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None ):
_UpperCAmelCase = data
_UpperCAmelCase = previous
_UpperCAmelCase = next_node
def __str__( self : Optional[Any] ):
return f'''{self.data}'''
def lowerCAmelCase_ ( self : Any ):
return self.data
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.next
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.previous
class a :
def __init__( self : str , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = head
def __iter__( self : Dict ):
return self
def lowerCAmelCase_ ( self : Optional[int] ):
if not self.current:
raise StopIteration
else:
_UpperCAmelCase = self.current.get_data()
_UpperCAmelCase = self.current.get_next()
return value
class a :
def __init__( self : Tuple ):
_UpperCAmelCase = None # First node in list
_UpperCAmelCase = None # Last node in list
def __str__( self : Dict ):
_UpperCAmelCase = self.head
_UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
_UpperCAmelCase = current.get_next()
return " ".join(str(__lowerCAmelCase ) for node in nodes )
def __contains__( self : Optional[int] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
_UpperCAmelCase = current.get_next()
return False
def __iter__( self : str ):
return LinkedListIterator(self.head )
def lowerCAmelCase_ ( self : Any ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase_ ( self : str ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Node ):
if self.head is None:
_UpperCAmelCase = node
_UpperCAmelCase = node
else:
self.insert_before_node(self.head , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Node ):
if self.head is None:
self.set_head(__lowerCAmelCase )
else:
self.insert_after_node(self.tail , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = Node(__lowerCAmelCase )
if self.head is None:
self.set_head(__lowerCAmelCase )
else:
self.set_tail(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
_UpperCAmelCase = node
_UpperCAmelCase = node.previous
if node.get_previous() is None:
_UpperCAmelCase = node_to_insert
else:
_UpperCAmelCase = node_to_insert
_UpperCAmelCase = node_to_insert
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
_UpperCAmelCase = node
_UpperCAmelCase = node.next
if node.get_next() is None:
_UpperCAmelCase = node_to_insert
else:
_UpperCAmelCase = node_to_insert
_UpperCAmelCase = node_to_insert
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = 1
_UpperCAmelCase = Node(__lowerCAmelCase )
_UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCAmelCase , __lowerCAmelCase )
return
current_position += 1
_UpperCAmelCase = node.next
self.insert_after_node(self.tail , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
_UpperCAmelCase = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] ):
if (node := self.get_node(__lowerCAmelCase )) is not None:
if node == self.head:
_UpperCAmelCase = self.head.get_next()
if node == self.tail:
_UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(__lowerCAmelCase )
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : Node ):
if node.get_next():
_UpperCAmelCase = node.previous
if node.get_previous():
_UpperCAmelCase = node.next
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : Dict ):
return self.head is None
def __UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
class a :
def __init__( self : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = data
_UpperCAmelCase = None
_UpperCAmelCase = None
def __UpperCAmelCase ( lowercase ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __UpperCAmelCase ( ): # Main function for testing.
"""simple docstring"""
_UpperCAmelCase = Node(1 )
_UpperCAmelCase = Node(2 )
_UpperCAmelCase = Node(3 )
_UpperCAmelCase = Node(4 )
_UpperCAmelCase = Node(5 )
_UpperCAmelCase = Node(6 )
_UpperCAmelCase = Node(7 )
_UpperCAmelCase = Node(8 )
_UpperCAmelCase = Node(9 )
print(is_full_binary_tree(lowercase ) )
print(depth_of_tree(lowercase ) )
print("""Tree is: """ )
display(lowercase )
if __name__ == "__main__":
main()
| 30
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 1
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCAmelCase__ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ = """PoolFormerConfig"""
# Base docstring
UpperCAmelCase__ = """sail/poolformer_s12"""
UpperCAmelCase__ = [1, 5_1_2, 7, 7]
# Image classification docstring
UpperCAmelCase__ = """sail/poolformer_s12"""
UpperCAmelCase__ = """tabby, tabby cat"""
UpperCAmelCase__ = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCAmelCase ( lowercase ,lowercase = 0.0 ,lowercase = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
_UpperCAmelCase = 1 - drop_prob
_UpperCAmelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_UpperCAmelCase = keep_prob + torch.rand(lowercase ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
_UpperCAmelCase = input.div(lowercase ) * random_tensor
return output
class a ( nn.Module ):
def __init__( self : List[Any] , __lowerCAmelCase : Optional[float] = None ):
super().__init__()
_UpperCAmelCase = drop_prob
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : torch.Tensor ):
return drop_path(__lowerCAmelCase , self.drop_prob , self.training )
def lowerCAmelCase_ ( self : Optional[Any] ):
return "p={}".format(self.drop_prob )
class a ( nn.Module ):
def __init__( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : str=None ):
super().__init__()
_UpperCAmelCase = patch_size if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
_UpperCAmelCase = stride if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (stride, stride)
_UpperCAmelCase = padding if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (padding, padding)
_UpperCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase )
_UpperCAmelCase = norm_layer(__lowerCAmelCase ) if norm_layer else nn.Identity()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = self.projection(__lowerCAmelCase )
_UpperCAmelCase = self.norm(__lowerCAmelCase )
return embeddings
class a ( nn.GroupNorm ):
def __init__( self : int , __lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ):
super().__init__(1 , __lowerCAmelCase , **__lowerCAmelCase )
class a ( nn.Module ):
def __init__( self : Tuple , __lowerCAmelCase : int ):
super().__init__()
_UpperCAmelCase = nn.AvgPoolad(__lowerCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Dict ):
return self.pool(__lowerCAmelCase ) - hidden_states
class a ( nn.Module ):
def __init__( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
super().__init__()
_UpperCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
_UpperCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
_UpperCAmelCase = PoolFormerDropPath(__lowerCAmelCase )
if isinstance(config.hidden_act , __lowerCAmelCase ):
_UpperCAmelCase = ACTaFN[config.hidden_act]
else:
_UpperCAmelCase = config.hidden_act
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.conva(__lowerCAmelCase )
_UpperCAmelCase = self.act_fn(__lowerCAmelCase )
_UpperCAmelCase = self.drop(__lowerCAmelCase )
_UpperCAmelCase = self.conva(__lowerCAmelCase )
_UpperCAmelCase = self.drop(__lowerCAmelCase )
return hidden_states
class a ( nn.Module ):
def __init__( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
super().__init__()
_UpperCAmelCase = PoolFormerPooling(__lowerCAmelCase )
_UpperCAmelCase = PoolFormerOutput(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = PoolFormerGroupNorm(__lowerCAmelCase )
_UpperCAmelCase = PoolFormerGroupNorm(__lowerCAmelCase )
# Useful for training neural nets
_UpperCAmelCase = PoolFormerDropPath(__lowerCAmelCase ) if drop_path > 0.0 else nn.Identity()
_UpperCAmelCase = config.use_layer_scale
if config.use_layer_scale:
_UpperCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCAmelCase) ) , requires_grad=__lowerCAmelCase )
_UpperCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCAmelCase) ) , requires_grad=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int ):
if self.use_layer_scale:
_UpperCAmelCase = self.pooling(self.before_norm(__lowerCAmelCase ) )
_UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_UpperCAmelCase = hidden_states + self.drop_path(__lowerCAmelCase )
_UpperCAmelCase = ()
_UpperCAmelCase = self.output(self.after_norm(__lowerCAmelCase ) )
_UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_UpperCAmelCase = hidden_states + self.drop_path(__lowerCAmelCase )
_UpperCAmelCase = (output,) + outputs
return outputs
else:
_UpperCAmelCase = self.drop_path(self.pooling(self.before_norm(__lowerCAmelCase ) ) )
# First residual connection
_UpperCAmelCase = pooling_output + hidden_states
_UpperCAmelCase = ()
# Second residual connection inside the PoolFormerOutput block
_UpperCAmelCase = self.drop_path(self.output(self.after_norm(__lowerCAmelCase ) ) )
_UpperCAmelCase = hidden_states + layer_output
_UpperCAmelCase = (output,) + outputs
return outputs
class a ( nn.Module ):
def __init__( self : Tuple , __lowerCAmelCase : List[Any] ):
super().__init__()
_UpperCAmelCase = config
# stochastic depth decay rule
_UpperCAmelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_UpperCAmelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_UpperCAmelCase = nn.ModuleList(__lowerCAmelCase )
# Transformer blocks
_UpperCAmelCase = []
_UpperCAmelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_UpperCAmelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowerCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowerCAmelCase ) )
_UpperCAmelCase = nn.ModuleList(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=True ):
_UpperCAmelCase = () if output_hidden_states else None
_UpperCAmelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_UpperCAmelCase , _UpperCAmelCase = layers
# Get patch embeddings from hidden_states
_UpperCAmelCase = embedding_layer(__lowerCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowerCAmelCase ):
_UpperCAmelCase = blk(__lowerCAmelCase )
_UpperCAmelCase = layer_outputs[0]
if output_hidden_states:
_UpperCAmelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase )
class a ( lowerCAmelCase_ ):
_snake_case : str = PoolFormerConfig
_snake_case : Dict = 'poolformer'
_snake_case : List[str] = 'pixel_values'
_snake_case : str = True
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=False ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = value
UpperCAmelCase__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowerCAmelCase_ , )
class a ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Tuple ):
super().__init__(__lowerCAmelCase )
_UpperCAmelCase = config
_UpperCAmelCase = PoolFormerEncoder(__lowerCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase_ ( self : List[Any] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ):
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_UpperCAmelCase = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
_UpperCAmelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class a ( nn.Module ):
def __init__( self : Dict , __lowerCAmelCase : str ):
super().__init__()
_UpperCAmelCase = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.dense(__lowerCAmelCase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , lowerCAmelCase_ , )
class a ( lowerCAmelCase_ ):
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
super().__init__(__lowerCAmelCase )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = PoolFormerModel(__lowerCAmelCase )
# Final norm
_UpperCAmelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_UpperCAmelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.poolformer(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
_UpperCAmelCase = outputs[0]
_UpperCAmelCase = self.classifier(self.norm(__lowerCAmelCase ).mean([-2, -1] ) )
_UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase = """single_label_classification"""
else:
_UpperCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
_UpperCAmelCase = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCAmelCase = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase = BCEWithLogitsLoss()
_UpperCAmelCase = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
| 30
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
def __init__( self : Any , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ):
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 30
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_UpperCAmelCase = result + left + right
return input_list
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) <= 1:
return input_list
_UpperCAmelCase = list(lowercase )
# iteration for two-way merging
_UpperCAmelCase = 2
while p <= len(lowercase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(lowercase ) ,lowercase ):
_UpperCAmelCase = i
_UpperCAmelCase = i + p - 1
_UpperCAmelCase = (low + high + 1) // 2
_UpperCAmelCase = merge(lowercase ,lowercase ,lowercase ,lowercase )
# final merge of last two parts
if p * 2 >= len(lowercase ):
_UpperCAmelCase = i
_UpperCAmelCase = merge(lowercase ,0 ,lowercase ,len(lowercase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 30
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(lowercase ) ,lowercase )
return number - int(lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 30
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 1
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase__ = 2
class a :
def __init__( self : List[Any] , *, # begin keyword-only arguments
__lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Optional[int]="<pad>" , __lowerCAmelCase : Any="</s>" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self : str , __lowerCAmelCase : Optional[Any] ):
return self.indices == other.indices
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
return len(self.symbols )
def __contains__( self : Optional[int] , __lowerCAmelCase : List[Any] ):
return sym in self.indices
@classmethod
def lowerCAmelCase_ ( cls : Dict , __lowerCAmelCase : str ):
_UpperCAmelCase = cls()
d.add_from_file(__lowerCAmelCase )
return d
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : Tuple=False ):
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(__lowerCAmelCase )
self.count.append(__lowerCAmelCase )
return idx
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[int] ):
return 0
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(__lowerCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(__lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(""" """ , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(__lowerCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(__lowerCAmelCase ) )
self.add_symbol(__lowerCAmelCase , n=__lowerCAmelCase , overwrite=__lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_UpperCAmelCase = dict((re.sub(R"""@@$""" ,"""""" ,lowercase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" ,"""</w>""" ,lowercase ), v) for k, v in d.items() )
_UpperCAmelCase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_UpperCAmelCase = d[k] # restore
return da
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# prep
if not os.path.exists(lowercase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(lowercase ,exist_ok=lowercase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_UpperCAmelCase = os.path.join(lowercase ,"""checkpoint.pt""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
_UpperCAmelCase = torch.load(lowercase ,map_location="""cpu""" )
_UpperCAmelCase = chkpt["""cfg"""]["""model"""]
# dicts
_UpperCAmelCase = os.path.join(lowercase ,"""dict.txt""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
_UpperCAmelCase = Dictionary.load(lowercase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = os.path.join(lowercase ,VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(lowercase ,"""bpecodes""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
_UpperCAmelCase = os.path.join(lowercase ,VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowercase ,lowercase )
# model config
_UpperCAmelCase = os.path.join(lowercase ,"""config.json""" )
_UpperCAmelCase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
_UpperCAmelCase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# model
_UpperCAmelCase = chkpt["""model"""]
# remove unneeded keys
_UpperCAmelCase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase ,lowercase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
_UpperCAmelCase = model_state_dict.pop(lowercase )
else:
_UpperCAmelCase = model_state_dict.pop(lowercase )
_UpperCAmelCase = BioGptConfig.from_pretrained(lowercase )
_UpperCAmelCase = BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowercase ,lowercase )
print("""Conversion is done!""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 1
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class a ( lowerCAmelCase_ ):
def __init__( self : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Tuple ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(__lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Dict ):
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , **__lowerCAmelCase : int ):
return {}, {}, {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = load_image(__lowerCAmelCase )
_UpperCAmelCase = image.size
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.model(**__lowerCAmelCase )
return model_outputs
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = model_outputs.predicted_depth
_UpperCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__lowerCAmelCase )
_UpperCAmelCase = prediction.squeeze().cpu().numpy()
_UpperCAmelCase = (output * 255 / np.max(__lowerCAmelCase )).astype("""uint8""" )
_UpperCAmelCase = Image.fromarray(__lowerCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = predicted_depth
_UpperCAmelCase = depth
return output_dict
| 30
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Dict = 'open-llama'
def __init__( self : Tuple , __lowerCAmelCase : str=10_0000 , __lowerCAmelCase : List[Any]=4096 , __lowerCAmelCase : str=1_1008 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : List[Any]="silu" , __lowerCAmelCase : List[Any]=2048 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1e-6 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=True , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : str , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
"""use_memorry_efficient_attention""" , __lowerCAmelCase )
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , )
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_UpperCAmelCase = self.rope_scaling.get("""type""" , __lowerCAmelCase )
_UpperCAmelCase = self.rope_scaling.get("""factor""" , __lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 30
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10 ,lowercase = 22 ):
"""simple docstring"""
_UpperCAmelCase = range(1 ,lowercase )
_UpperCAmelCase = range(1 ,lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(1_0, 2_2) = }''')
| 30
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = "x" ,lowercase = 10**-10 ,lowercase = 1 ,):
"""simple docstring"""
_UpperCAmelCase = symbols(lowercase )
_UpperCAmelCase = lambdify(lowercase ,lowercase )
_UpperCAmelCase = lambdify(lowercase ,diff(lowercase ,lowercase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(lowercase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'''{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-m""" ,"""--pretrained_model_name_or_path""" ,type=lowercase ,default=lowercase ,required=lowercase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,)
parser.add_argument(
"""-c""" ,"""--caption""" ,type=lowercase ,default="""robotic cat with wings""" ,help="""Text used to generate images.""" ,)
parser.add_argument(
"""-n""" ,"""--images_num""" ,type=lowercase ,default=4 ,help="""How much images to generate.""" ,)
parser.add_argument(
"""-s""" ,"""--seed""" ,type=lowercase ,default=42 ,help="""Seed for random process.""" ,)
parser.add_argument(
"""-ci""" ,"""--cuda_id""" ,type=lowercase ,default=0 ,help="""cuda_id.""" ,)
_UpperCAmelCase = parser.parse_args()
return args
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if not len(lowercase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
_UpperCAmelCase , _UpperCAmelCase = imgs[0].size
_UpperCAmelCase = Image.new("""RGB""" ,size=(cols * w, rows * h) )
_UpperCAmelCase , _UpperCAmelCase = grid.size
for i, img in enumerate(lowercase ):
grid.paste(lowercase ,box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( lowercase ,lowercase="robotic cat with wings" ,lowercase=7.5 ,lowercase=50 ,lowercase=1 ,lowercase=42 ,):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(pipeline.device ).manual_seed(lowercase )
_UpperCAmelCase = pipeline(
lowercase ,guidance_scale=lowercase ,num_inference_steps=lowercase ,generator=lowercase ,num_images_per_prompt=lowercase ,).images
_UpperCAmelCase = int(math.sqrt(lowercase ) )
_UpperCAmelCase = image_grid(lowercase ,rows=_rows ,cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
UpperCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
UpperCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
UpperCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
UpperCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
UpperCAmelCase__ = unet.to(torch.device("""cuda""", args.cuda_id))
UpperCAmelCase__ = pipeline.to(unet.device)
UpperCAmelCase__ , UpperCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
UpperCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 30
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 1
|
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
if isinstance(lowercase ,lowercase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for d in reversed(lowercase ):
idx.append(flat_idx % d )
_UpperCAmelCase = flat_idx // d
return tuple(reversed(lowercase ) )
@torch.jit.ignore
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = None ,lowercase = None ,):
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowercase ) -> None:
_UpperCAmelCase = True
for i in range(len(lowercase ) ):
_UpperCAmelCase = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCAmelCase = l[reversed_idx]
if start_edges is None:
_UpperCAmelCase = [s == 0 for s in start]
reduce_edge_list(lowercase )
if end_edges is None:
_UpperCAmelCase = [e == (d - 1) for e, d in zip(lowercase ,lowercase )]
reduce_edge_list(lowercase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase ) == 0:
return [()]
elif len(lowercase ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
_UpperCAmelCase = []
_UpperCAmelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase ,lowercase ):
if s == e:
path_list.append(slice(lowercase ,s + 1 ) )
else:
break
_UpperCAmelCase = tuple(lowercase )
_UpperCAmelCase = len(lowercase )
# start == end, and we're done
if divergence_idx == len(lowercase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase = start[divergence_idx]
return tuple(
path + (slice(lowercase ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase = end[divergence_idx]
return tuple(
path + (slice(lowercase ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCAmelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = t.shape[:no_batch_dims]
_UpperCAmelCase = list(_flat_idx_to_idx(lowercase ,lowercase ) )
# _get_minimal_slice_set is inclusive
_UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 ,lowercase ) )
# Get an ordered list of slices to perform
_UpperCAmelCase = _get_minimal_slice_set(
lowercase ,lowercase ,lowercase ,)
_UpperCAmelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase = False ,lowercase = None ,lowercase = False ,):
"""simple docstring"""
if not (len(lowercase ) > 0):
raise ValueError("""Must provide at least one input""" )
_UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase )]
_UpperCAmelCase = tuple([max(lowercase ) for s in zip(*lowercase )] )
def _prep_inputs(lowercase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCAmelCase = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
_UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCAmelCase = tensor_tree_map(_prep_inputs ,lowercase )
_UpperCAmelCase = None
if _out is not None:
_UpperCAmelCase = tensor_tree_map(lambda lowercase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
_UpperCAmelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCAmelCase = 0
_UpperCAmelCase = prepped_outputs
for _ in range(lowercase ):
# Chunk the input
if not low_mem:
_UpperCAmelCase = _select_chunk
else:
_UpperCAmelCase = partial(
_chunk_slice ,flat_start=lowercase ,flat_end=min(lowercase ,i + chunk_size ) ,no_batch_dims=len(lowercase ) ,)
_UpperCAmelCase = tensor_tree_map(lowercase ,lowercase )
# Run the layer on the chunk
_UpperCAmelCase = layer(**lowercase )
# Allocate space for the output
if out is None:
_UpperCAmelCase = tensor_tree_map(lambda lowercase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,lowercase )
# Put the chunk in its pre-allocated space
if isinstance(lowercase ,lowercase ):
def assign(lowercase ,lowercase ) -> None:
for k, v in da.items():
if isinstance(lowercase ,lowercase ):
assign(lowercase ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCAmelCase = da[k]
assign(lowercase ,lowercase )
elif isinstance(lowercase ,lowercase ):
for xa, xa in zip(lowercase ,lowercase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCAmelCase = xa
elif isinstance(lowercase ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCAmelCase = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
_UpperCAmelCase = tensor_tree_map(lambda lowercase : t.view(orig_batch_dims + t.shape[1:] ) ,lowercase )
return out
class a :
def __init__( self : str , __lowerCAmelCase : int = 512 , ):
_UpperCAmelCase = max_chunk_size
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_UpperCAmelCase = [c for c in candidates if c > min_chunk_size]
_UpperCAmelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCAmelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCAmelCase , chunk_size=__lowerCAmelCase )
return True
except RuntimeError:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = len(__lowerCAmelCase ) - 1
while i > min_viable_chunk_size_index:
_UpperCAmelCase = test_chunk_size(candidates[i] )
if not viable:
_UpperCAmelCase = (min_viable_chunk_size_index + i) // 2
else:
_UpperCAmelCase = i
_UpperCAmelCase = (i + len(__lowerCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Iterable , __lowerCAmelCase : Iterable ):
_UpperCAmelCase = True
for aa, aa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert type(__lowerCAmelCase ) == type(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
_UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int , ):
_UpperCAmelCase = True
_UpperCAmelCase = tree_map(lambda __lowerCAmelCase : a.shape if isinstance(__lowerCAmelCase , torch.Tensor ) else a , __lowerCAmelCase , __lowerCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCAmelCase )
_UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , __lowerCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
_UpperCAmelCase = False
if not consistent:
_UpperCAmelCase = self._determine_favorable_chunk_size(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_UpperCAmelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 30
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCAmelCase ( lowercase = 35 ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(lowercase ) and is_sq(lowercase ):
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase ) and is_sq(lowercase ):
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
for num, den in unique_s:
total += Fraction(lowercase ,lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return number | (1 << position)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return number & ~(1 << position)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return number ^ (1 << position)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = DownBlockaD # noqa F405
_snake_case : Tuple = 'down'
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = ResnetDownsampleBlockaD # noqa F405
_snake_case : Union[str, Any] = 'down'
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = AttnDownBlockaD # noqa F405
_snake_case : Union[str, Any] = 'down'
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Tuple = CrossAttnDownBlockaD # noqa F405
_snake_case : Optional[Any] = 'down'
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = SimpleCrossAttnDownBlockaD # noqa F405
_snake_case : Dict = 'down'
@property
def lowerCAmelCase_ ( self : str ):
return super().get_dummy_input(include_encoder_hidden_states=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = SkipDownBlockaD # noqa F405
_snake_case : int = 'down'
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return super().get_dummy_input(include_skip_sample=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = AttnSkipDownBlockaD # noqa F405
_snake_case : Optional[Any] = 'down'
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return super().get_dummy_input(include_skip_sample=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Union[str, Any] = DownEncoderBlockaD # noqa F405
_snake_case : Any = 'down'
@property
def lowerCAmelCase_ ( self : Any ):
return super().get_dummy_input(include_temb=__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = {
"""in_channels""": 32,
"""out_channels""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = AttnDownEncoderBlockaD # noqa F405
_snake_case : Dict = 'down'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = {
"""in_channels""": 32,
"""out_channels""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = UNetMidBlockaD # noqa F405
_snake_case : Union[str, Any] = 'mid'
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = UNetMidBlockaDCrossAttn # noqa F405
_snake_case : List[str] = 'mid'
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
_snake_case : List[Any] = 'mid'
@property
def lowerCAmelCase_ ( self : Dict ):
return super().get_dummy_input(include_encoder_hidden_states=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = UpBlockaD # noqa F405
_snake_case : Dict = 'up'
@property
def lowerCAmelCase_ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Tuple = ResnetUpsampleBlockaD # noqa F405
_snake_case : Optional[Any] = 'up'
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Union[str, Any] = CrossAttnUpBlockaD # noqa F405
_snake_case : Tuple = 'up'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = SimpleCrossAttnUpBlockaD # noqa F405
_snake_case : List[Any] = 'up'
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase , include_encoder_hidden_states=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 32
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = AttnUpBlockaD # noqa F405
_snake_case : List[Any] = 'up'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = SkipUpBlockaD # noqa F405
_snake_case : Dict = 'up'
@property
def lowerCAmelCase_ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Any = AttnSkipUpBlockaD # noqa F405
_snake_case : Optional[Any] = 'up'
@property
def lowerCAmelCase_ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Tuple = UpDecoderBlockaD # noqa F405
_snake_case : str = 'up'
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = {"""in_channels""": 32, """out_channels""": 32}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(__lowerCAmelCase )
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_snake_case : Dict = 'up'
@property
def lowerCAmelCase_ ( self : Tuple ):
return super().get_dummy_input(include_temb=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = {"""in_channels""": 32, """out_channels""": 32}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(__lowerCAmelCase )
| 30
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 ,lowercase ,lowercase ,lowercase )
move_disk(lowercase ,lowercase )
move_tower(height - 1 ,lowercase ,lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
print("""moving disk from""" ,lowercase ,"""to""" ,lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowercase ,"""A""" ,"""B""" ,"""C""" )
if __name__ == "__main__":
main()
| 30
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_UpperCAmelCase = [True] * (num + 1)
_UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,lowercase ):
_UpperCAmelCase = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 30
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : str = 'xmod'
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[str]=3_0522 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : str=1e-1_2 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : int=("en_XX",) , __lowerCAmelCase : str=None , **__lowerCAmelCase : str , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = pre_norm
_UpperCAmelCase = adapter_reduction_factor
_UpperCAmelCase = adapter_layer_norm
_UpperCAmelCase = adapter_reuse_layer_norm
_UpperCAmelCase = ln_before_adapter
_UpperCAmelCase = list(__lowerCAmelCase )
_UpperCAmelCase = default_language
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 30
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return np.where(vector > 0 ,lowercase ,(alpha * (np.exp(lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 1
|
"""simple docstring"""
import torch
from torch import nn
class a ( nn.Module ):
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[int]=False ):
super().__init__()
_UpperCAmelCase = n_token
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_proj
_UpperCAmelCase = cutoffs + [n_token]
_UpperCAmelCase = [0] + self.cutoffs
_UpperCAmelCase = div_val
_UpperCAmelCase = self.cutoffs[0]
_UpperCAmelCase = len(self.cutoffs ) - 1
_UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
_UpperCAmelCase = keep_order
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ):
if proj is None:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase = hidden[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
_UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_UpperCAmelCase = labels != -100
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
_UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
_UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
_UpperCAmelCase = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Any ):
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase = logprob_i
return out
| 30
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 1
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
UpperCAmelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCAmelCase__ = concatenate_datasets
UpperCAmelCase__ = DownloadConfig
UpperCAmelCase__ = DownloadManager
UpperCAmelCase__ = DownloadMode
UpperCAmelCase__ = DownloadConfig
UpperCAmelCase__ = DownloadMode
UpperCAmelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 30
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 1
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
for param in module.parameters():
_UpperCAmelCase = False
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = plt.imshow(lowercase )
fig.axes.get_xaxis().set_visible(lowercase )
fig.axes.get_yaxis().set_visible(lowercase )
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 30
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 1
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : Tuple = 'linear'
_snake_case : str = 'cosine'
_snake_case : int = 'cosine_with_restarts'
_snake_case : Tuple = 'polynomial'
_snake_case : Dict = 'constant'
_snake_case : Tuple = 'constant_with_warmup'
_snake_case : Dict = 'piecewise_constant'
def __UpperCAmelCase ( lowercase ,lowercase = -1 ):
"""simple docstring"""
return LambdaLR(lowercase ,lambda lowercase : 1 ,last_epoch=lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = -1 ):
"""simple docstring"""
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1.0 ,lowercase ) )
return 1.0
return LambdaLR(lowercase ,lowercase ,last_epoch=lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = -1 ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
_UpperCAmelCase , _UpperCAmelCase = rule_str.split(""":""" )
_UpperCAmelCase = int(lowercase )
_UpperCAmelCase = float(lowercase )
_UpperCAmelCase = value
_UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(lowercase ,lowercase ):
def rule_func(lowercase ) -> float:
_UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_UpperCAmelCase = create_rules_function(lowercase ,lowercase )
return LambdaLR(lowercase ,lowercase ,last_epoch=lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=-1 ):
"""simple docstring"""
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 ,lowercase ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowercase ,lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = 0.5 ,lowercase = -1 ):
"""simple docstring"""
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 ,lowercase ) )
_UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(lowercase ) * 2.0 * progress )) )
return LambdaLR(lowercase ,lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = 1 ,lowercase = -1 ):
"""simple docstring"""
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 ,lowercase ) )
_UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(lowercase ) * progress) % 1.0) )) )
return LambdaLR(lowercase ,lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=1E-7 ,lowercase=1.0 ,lowercase=-1 ):
"""simple docstring"""
_UpperCAmelCase = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowercase ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1 ,lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_UpperCAmelCase = lr_init - lr_end
_UpperCAmelCase = num_training_steps - num_warmup_steps
_UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
_UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowercase ,lowercase ,lowercase )
UpperCAmelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = 1 ,lowercase = 1.0 ,lowercase = -1 ,):
"""simple docstring"""
_UpperCAmelCase = SchedulerType(lowercase )
_UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowercase ,last_epoch=lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowercase ,step_rules=lowercase ,last_epoch=lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowercase ,num_warmup_steps=lowercase ,last_epoch=lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowercase ,num_warmup_steps=lowercase ,num_training_steps=lowercase ,num_cycles=lowercase ,last_epoch=lowercase ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowercase ,num_warmup_steps=lowercase ,num_training_steps=lowercase ,power=lowercase ,last_epoch=lowercase ,)
return schedule_func(
lowercase ,num_warmup_steps=lowercase ,num_training_steps=lowercase ,last_epoch=lowercase )
| 30
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 1
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : int ):
debug_launcher(test_script.main )
def lowerCAmelCase_ ( self : List[str] ):
debug_launcher(test_ops.main )
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : torch.FloatTensor
class a ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self : List[Any] , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : int = 20 , __lowerCAmelCase : int = 768 , __lowerCAmelCase : Optional[Any]=77 , __lowerCAmelCase : int=4 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : str = "silu" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = "linear" , __lowerCAmelCase : Optional[str] = "prd" , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , ):
super().__init__()
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = attention_head_dim
_UpperCAmelCase = num_attention_heads * attention_head_dim
_UpperCAmelCase = additional_embeddings
_UpperCAmelCase = time_embed_dim or inner_dim
_UpperCAmelCase = embedding_proj_dim or embedding_dim
_UpperCAmelCase = clip_embed_dim or embedding_dim
_UpperCAmelCase = Timesteps(__lowerCAmelCase , __lowerCAmelCase , 0 )
_UpperCAmelCase = TimestepEmbedding(__lowerCAmelCase , __lowerCAmelCase , out_dim=__lowerCAmelCase , act_fn=__lowerCAmelCase )
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
if embedding_proj_norm_type is None:
_UpperCAmelCase = None
elif embedding_proj_norm_type == "layer":
_UpperCAmelCase = nn.LayerNorm(__lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
if encoder_hid_proj_type is None:
_UpperCAmelCase = None
elif encoder_hid_proj_type == "linear":
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_UpperCAmelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowerCAmelCase ) )
if added_emb_type == "prd":
_UpperCAmelCase = nn.Parameter(torch.zeros(1 , 1 , __lowerCAmelCase ) )
elif added_emb_type is None:
_UpperCAmelCase = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , activation_fn="""gelu""" , attention_bias=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
if norm_in_type == "layer":
_UpperCAmelCase = nn.LayerNorm(__lowerCAmelCase )
elif norm_in_type is None:
_UpperCAmelCase = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
_UpperCAmelCase = nn.LayerNorm(__lowerCAmelCase )
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
_UpperCAmelCase = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowerCAmelCase , persistent=__lowerCAmelCase )
_UpperCAmelCase = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
_UpperCAmelCase = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = {}
def fn_recursive_add_processors(__lowerCAmelCase : str , __lowerCAmelCase : torch.nn.Module , __lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(__lowerCAmelCase , """set_processor""" ):
_UpperCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __lowerCAmelCase , __lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return processors
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
_UpperCAmelCase = len(self.attn_processors.keys() )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__lowerCAmelCase : str , __lowerCAmelCase : torch.nn.Module , __lowerCAmelCase : Any ):
if hasattr(__lowerCAmelCase , """set_processor""" ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.set_processor(__lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __lowerCAmelCase , __lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
self.set_attn_processor(AttnProcessor() )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[torch.Tensor, float, int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.BoolTensor] = None , __lowerCAmelCase : bool = True , ):
_UpperCAmelCase = hidden_states.shape[0]
_UpperCAmelCase = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase = timesteps * torch.ones(__lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
_UpperCAmelCase = self.time_proj(__lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_UpperCAmelCase = timesteps_projected.to(dtype=self.dtype )
_UpperCAmelCase = self.time_embedding(__lowerCAmelCase )
if self.embedding_proj_norm is not None:
_UpperCAmelCase = self.embedding_proj_norm(__lowerCAmelCase )
_UpperCAmelCase = self.embedding_proj(__lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_UpperCAmelCase = self.encoder_hidden_states_proj(__lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_UpperCAmelCase = self.proj_in(__lowerCAmelCase )
_UpperCAmelCase = self.positional_embedding.to(hidden_states.dtype )
_UpperCAmelCase = []
_UpperCAmelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_UpperCAmelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_UpperCAmelCase = hidden_states[:, None, :]
_UpperCAmelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_UpperCAmelCase = self.prd_embedding.to(hidden_states.dtype ).expand(__lowerCAmelCase , -1 , -1 )
additional_embeds.append(__lowerCAmelCase )
_UpperCAmelCase = torch.cat(
__lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_UpperCAmelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_UpperCAmelCase = F.pad(
__lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_UpperCAmelCase = hidden_states + positional_embeddings
if attention_mask is not None:
_UpperCAmelCase = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
_UpperCAmelCase = F.pad(__lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
_UpperCAmelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_UpperCAmelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_UpperCAmelCase = self.norm_in(__lowerCAmelCase )
for block in self.transformer_blocks:
_UpperCAmelCase = block(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_UpperCAmelCase = self.norm_out(__lowerCAmelCase )
if self.prd_embedding is not None:
_UpperCAmelCase = hidden_states[:, -1]
else:
_UpperCAmelCase = hidden_states[:, additional_embeddings_len:]
_UpperCAmelCase = self.proj_to_clip_embeddings(__lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return EnvironmentCommand()
class a ( lowerCAmelCase_ ):
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : ArgumentParser ):
_UpperCAmelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = huggingface_hub.__version__
_UpperCAmelCase = """not installed"""
_UpperCAmelCase = """NA"""
if is_torch_available():
import torch
_UpperCAmelCase = torch.__version__
_UpperCAmelCase = torch.cuda.is_available()
_UpperCAmelCase = """not installed"""
if is_transformers_available():
import transformers
_UpperCAmelCase = transformers.__version__
_UpperCAmelCase = """not installed"""
if is_accelerate_available():
import accelerate
_UpperCAmelCase = accelerate.__version__
_UpperCAmelCase = """not installed"""
if is_xformers_available():
import xformers
_UpperCAmelCase = xformers.__version__
_UpperCAmelCase = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__lowerCAmelCase ) )
return info
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : Optional[Any] ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 30
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 1
|
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class a :
_snake_case : float
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# Validation
def is_valid_tree(lowercase ) -> bool:
if node is None:
return True
if not isinstance(lowercase ,lowercase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowercase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
lowercase ,lowercase ,lowercase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,lowercase ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,lowercase )
)
return is_binary_search_tree_recursive_check(lowercase ,-float("""inf""" ) ,float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.