code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from functools import lru_cache
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_a)
if n > 1:
factors.add(_a)
return factors
@lru_cache
def lowerCamelCase__ ( _a):
return len(unique_prime_factors(_a))
def lowerCamelCase__ ( _a):
return len(set(_a)) in (0, 1)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE : Union[str, Any] = [base + i for i in range(_a)]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE : List[str] = [upf_len(_a) for x in group]
checker.append(_a)
# If all numbers in the list are equal, return the group variable.
if equality(_a):
return group
# Increment our base variable by 1
base += 1
def lowerCamelCase__ ( _a = 4):
SCREAMING_SNAKE_CASE : Tuple = run(_a)
return results[0] if len(_a) else None
if __name__ == "__main__":
print(solution()) | 76 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = IFInpaintingPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCamelCase ( self ):
return self._get_dummy_components()
def UpperCamelCase ( self , lowercase_ , lowercase_=0 ):
if str(_UpperCAmelCase ).startswith("mps" ):
_snake_case : Dict = torch.manual_seed(_UpperCAmelCase )
else:
_snake_case : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_snake_case : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_snake_case : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_snake_case : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ):
self._test_save_load_local()
def UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , ) | 352 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0 | 284 | 0 |
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square(UpperCamelCase_ , UpperCamelCase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(UpperCamelCase_ , col + 1 )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(row + 1 , col + 1 )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square(row + 1 , UpperCamelCase_ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ = max(largest_square_area[0] , UpperCamelCase_ )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(UpperCamelCase_ , col + 1 , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = update_area_of_max_square_using_dp_array(row + 1 , UpperCamelCase_ , UpperCamelCase_ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ = max(largest_square_area[0] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ = [0]
SCREAMING_SNAKE_CASE__ = [[-1] * cols for _ in range(UpperCamelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCamelCase_ )
return largest_square_area[0]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ = dp_array[row][col + 1]
SCREAMING_SNAKE_CASE__ = dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE__ = dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = max(dp_array[row][col] , UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = 0
return largest_square_area
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ = current_row[col + 1]
SCREAMING_SNAKE_CASE__ = next_row[col + 1]
SCREAMING_SNAKE_CASE__ = next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = max(current_row[col] , UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 176 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.txt"""}
__snake_case = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
__snake_case = {
"""facebook/esm2_t6_8M_UR50D""": 10_24,
"""facebook/esm2_t12_35M_UR50D""": 10_24,
}
def _lowercase ( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
with open(UpperCamelCase_ , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple =VOCAB_FILES_NAMES
A__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
A__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Optional[Any]="<cls>" , UpperCAmelCase_ : List[Any]="<pad>" , UpperCAmelCase_ : Optional[int]="<mask>" , UpperCAmelCase_ : Optional[int]="<eos>" , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = load_vocab_file(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE__ = unk_token
SCREAMING_SNAKE_CASE__ = cls_token
SCREAMING_SNAKE_CASE__ = pad_token
SCREAMING_SNAKE_CASE__ = mask_token
SCREAMING_SNAKE_CASE__ = eos_token
SCREAMING_SNAKE_CASE__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A_ ( self : Any , UpperCAmelCase_ : int ):
return self._id_to_token.get(UpperCAmelCase_ , self.unk_token )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
return self._token_to_id.get(UpperCAmelCase_ , self._token_to_id.get(self.unk_token ) )
def A_ ( self : List[str] , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ):
return text.split()
def A_ ( self : str , UpperCAmelCase_ : Optional[Any]=False ):
return len(self._id_to_token )
def A_ ( self : Union[str, Any] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def A_ ( self : Any , UpperCAmelCase_ : str ):
return self._token_to_id.get(UpperCAmelCase_ , self._token_to_id.get(self.unk_token ) )
def A_ ( self : List[str] , UpperCAmelCase_ : int ):
return self._id_to_token.get(UpperCAmelCase_ , self.unk_token )
def A_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A_ ( self : Dict , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(UpperCAmelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase_ ) + [1]
return mask
def A_ ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(UpperCAmelCase_ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def A_ ( self : int ):
return self.get_vocab_size(with_added_tokens=UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : Union[List[str], List[AddedToken]] , UpperCAmelCase_ : bool = False ):
return super()._add_tokens(UpperCAmelCase_ , special_tokens=UpperCAmelCase_ )
| 176 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase( __lowerCamelCase ):
return (data["data"], data["target"])
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCamelCase , __lowerCamelCase )
# Predict target for test data
__a = xgb.predict(__lowerCamelCase )
__a = predictions.reshape(len(__lowerCamelCase ) , 1 )
return predictions
def lowerCAmelCase( ):
__a = fetch_california_housing()
__a , __a = data_handling(__lowerCamelCase )
__a , __a , __a , __a = train_test_split(
__lowerCamelCase , __lowerCamelCase , test_size=0.25 , random_state=1 )
__a = xgboost(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__lowerCamelCase , __lowerCamelCase )}''' )
print(f'''Mean Square Error : {mean_squared_error(__lowerCamelCase , __lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 197 | import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__a = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sgugger/tiny-distilbert-classification'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = 'patrickvonplaten/t5-tiny-random'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(UpperCAmelCase , 'env.csv' ) , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'env.csv' ) ).exists() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(UpperCAmelCase ):
self.assertTrue(hasattr(UpperCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'current' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , 'log.txt' ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'log.txt' ) ).exists() )
| 197 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Any = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : Dict = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Any = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Optional[Any] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : Tuple = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[str] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : List[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : List[str] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : List[str] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
snake_case_ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
snake_case_ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _SCREAMING_SNAKE_CASE )
# start time
snake_case_ = time.time()
# Run inference
context.execute_async(
bindings=[int(_SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
snake_case_ = time.time()
snake_case_ = end_time - start_time
snake_case_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Any = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : Tuple = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : int = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : int = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Optional[Any] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Any = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
snake_case_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
snake_case_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=_SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
snake_case_ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
snake_case_ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
snake_case_ = tokenized_examples.sequence_ids(_SCREAMING_SNAKE_CASE )
snake_case_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
snake_case_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
snake_case_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : str = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[str] = default_data_collator
__SCREAMING_SNAKE_CASE : int = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="eval" ) -> Optional[int]:
# Post-processing: we match the start logits and end logits to answers in the original context.
snake_case_ = postprocess_qa_predictions(
examples=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
snake_case_ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
snake_case_ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
snake_case_ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_SCREAMING_SNAKE_CASE , label_ids=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
return trt.volume(engine.get_binding_shape(_SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(_SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : str = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : List[str] = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : List[str] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : List[Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Tuple = timeit.default_timer()
__SCREAMING_SNAKE_CASE : int = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = outputs
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : Tuple = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[str] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 347 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __snake_case ( _lowercase):
snake_case__ : int = "data2vec-text"
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int]=3_0_5_2_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : Dict=1_2 , __lowerCAmelCase : Union[str, Any]=3_0_7_2 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Any=1E-12 , __lowerCAmelCase : int=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[Any]="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[str] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : Union[str, Any] = use_cache
_lowerCamelCase : str = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 175 |
"""simple docstring"""
from torch import nn
class __snake_case ( nn.Module):
def __init__( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : List[str] = class_size
_lowerCamelCase : List[str] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCamelCase : Optional[int] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.mlp(__lowerCAmelCase )
return logits
| 175 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A: List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = size if size is not None else {"""shortest_edge""": 256}
UpperCAmelCase : int = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
UpperCAmelCase : Optional[int] = do_resize
UpperCAmelCase : int = size
UpperCAmelCase : Any = resample
UpperCAmelCase : Dict = do_center_crop
UpperCAmelCase : Dict = crop_size
UpperCAmelCase : Tuple = do_rescale
UpperCAmelCase : Optional[Any] = rescale_factor
UpperCAmelCase : Any = do_normalize
UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : int = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase : Tuple = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : str = size if size is not None else self.size
UpperCAmelCase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = resample if resample is not None else self.resample
UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : int = image_std if image_std is not None else self.image_std
UpperCAmelCase : List[Any] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase : Optional[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCAmelCase : Any = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCAmelCase : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCAmelCase : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCAmelCase : str = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase : str = {"""pixel_values""": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = target_sizes.numpy()
UpperCAmelCase : Tuple = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : List[Any] = logits.argmax(dim=1 )
UpperCAmelCase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 109 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 0 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_a = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ (a__ ):
'''simple docstring'''
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase_ : Optional[int] = eval_examples
UpperCAmelCase_ : Any = post_process_function
UpperCAmelCase_ : int = quant_trainer_args
UpperCAmelCase_ : str = 128 # default number of calibration samples
def UpperCamelCase__ ( self , lowercase_=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
UpperCAmelCase_ : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCAmelCase_ : Optional[int] = self._remove_unused_columns(_lowerCamelCase , description="Calibration" )
return DataLoader(
_lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_lowerCamelCase , )
def UpperCamelCase__ ( self , lowercase_=None ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.train_dataset if calib_dataset is None else calib_dataset
UpperCAmelCase_ : Optional[Any] = self.get_calib_dataloader(_lowerCamelCase )
UpperCAmelCase_ : str = self.model
quant_trainer.configure_model(_lowerCamelCase , self.quant_trainer_args , calib=_lowerCamelCase )
model.eval()
quant_trainer.enable_calibration(_lowerCamelCase )
logger.info("***** Running calibration *****" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(_lowerCamelCase ):
# Prediction step
UpperCAmelCase_ : Union[str, Any] = self.prediction_step(_lowerCamelCase , _lowerCamelCase , prediction_loss_only=_lowerCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_lowerCamelCase , self.quant_trainer_args )
UpperCAmelCase_ : int = model
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = "eval" ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ : Optional[int] = self.get_eval_dataloader(_lowerCamelCase )
UpperCAmelCase_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Tuple = self.compute_metrics
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ : List[Any] = eval_loop(
_lowerCamelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , )
finally:
UpperCAmelCase_ : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCAmelCase_ : Dict = self.post_process_function(_lowerCamelCase , _lowerCamelCase , output.predictions )
UpperCAmelCase_ : Tuple = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_ : str = metrics.pop(_lowerCamelCase )
self.log(_lowerCamelCase )
else:
UpperCAmelCase_ : Tuple = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" ):
"""simple docstring"""
UpperCAmelCase_ : int = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Optional[int] = self.compute_metrics
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ : Any = eval_loop(
_lowerCamelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , )
finally:
UpperCAmelCase_ : Optional[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ : List[Any] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , output.predictions , "predict" )
UpperCAmelCase_ : List[str] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_ : Any = metrics.pop(_lowerCamelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
def UpperCamelCase__ ( self , lowercase_="./" ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.eval_dataset
UpperCAmelCase_ : Any = self.get_eval_dataloader(_lowerCamelCase )
UpperCAmelCase_ : Any = next(iter(_lowerCamelCase ) )
# saving device - to make it consistent
UpperCAmelCase_ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
UpperCAmelCase_ : List[Any] = tuple(v.to(_lowerCamelCase ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
UpperCAmelCase_ : int = True
UpperCAmelCase_ : List[Any] = self.model.to(_lowerCamelCase )
model.eval()
model.float()
UpperCAmelCase_ : Dict = model.module if hasattr(_lowerCamelCase , "module" ) else model
quant_trainer.configure_model(_lowerCamelCase , self.quant_trainer_args )
UpperCAmelCase_ : Any = os.path.join(_lowerCamelCase , "model.onnx" )
logger.info(F"""exporting model to {output_model_file}""" )
UpperCAmelCase_ : str = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , export_params=_lowerCamelCase , opset_version=13 , do_constant_folding=_lowerCamelCase , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=_lowerCamelCase , )
logger.info("onnx export finished" )
| 354 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23 | 0 |
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 146 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = LxmertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ : List[str] = LxmertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 146 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__A : int = "true"
def A_ ( snake_case_ : Tuple ,snake_case_ : Optional[Any]=8_2 ,snake_case_ : Optional[Any]=1_6 ):
'''simple docstring'''
set_seed(4_2 )
UpperCamelCase : List[Any] = RegressionModel()
UpperCamelCase : Tuple = deepcopy(__A )
UpperCamelCase : Union[str, Any] = RegressionDataset(length=__A )
UpperCamelCase : int = DataLoader(__A ,batch_size=__A )
model.to(accelerator.device )
UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(__A ,__A )
return model, ddp_model, dataloader
def A_ ( snake_case_ : Accelerator ,snake_case_ : Dict=False ):
'''simple docstring'''
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCamelCase : List[Any] = load_dataset("""glue""" ,"""mrpc""" ,split="""validation""" )
def tokenize_function(snake_case_ : List[Any] ):
UpperCamelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=__A ,max_length=__A )
return outputs
with accelerator.main_process_first():
UpperCamelCase : str = dataset.map(
__A ,batched=__A ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
UpperCamelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case_ : int ):
if use_longest:
return tokenizer.pad(__A ,padding="""longest""" ,return_tensors="""pt""" )
return tokenizer.pad(__A ,padding="""max_length""" ,max_length=1_2_8 ,return_tensors="""pt""" )
return DataLoader(__A ,shuffle=__A ,collate_fn=__A ,batch_size=1_6 )
def A_ ( snake_case_ : List[str] ,snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : int = Accelerator(dispatch_batches=__A ,split_batches=__A )
UpperCamelCase : Optional[Any] = get_dataloader(__A ,not dispatch_batches )
UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" ,return_dict=__A )
UpperCamelCase , UpperCamelCase : int = accelerator.prepare(__A ,__A )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A_ ( snake_case_ : Any ,snake_case_ : Optional[Any] ,snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : List[Any] = []
for batch in dataloader:
UpperCamelCase , UpperCamelCase : List[Any] = batch.values()
with torch.no_grad():
UpperCamelCase : str = model(__A )
UpperCamelCase , UpperCamelCase : List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCamelCase , UpperCamelCase : Union[str, Any] = [], []
for logit, targ in logits_and_targets:
logits.append(__A )
targs.append(__A )
UpperCamelCase , UpperCamelCase : Union[str, Any] = torch.cat(__A ), torch.cat(__A )
return logits, targs
def A_ ( snake_case_ : Accelerator ,snake_case_ : Tuple=8_2 ,snake_case_ : Tuple=False ,snake_case_ : Tuple=False ,snake_case_ : Optional[Any]=1_6 ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = get_basic_setup(__A ,__A ,__A )
UpperCamelCase , UpperCamelCase : Dict = generate_predictions(__A ,__A ,__A )
assert (
len(__A ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__A )}'
def A_ ( snake_case_ : bool = False ,snake_case_ : bool = False ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = evaluate.load("""glue""" ,"""mrpc""" )
UpperCamelCase , UpperCamelCase : List[str] = get_mrpc_setup(__A ,__A )
# First do baseline
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = setup["""no"""]
model.to(__A )
model.eval()
for batch in dataloader:
batch.to(__A )
with torch.inference_mode():
UpperCamelCase : Tuple = model(**__A )
UpperCamelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__A ,references=batch["""labels"""] )
UpperCamelCase : Optional[Any] = metric.compute()
# Then do distributed
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCamelCase : List[Any] = model(**__A )
UpperCamelCase : str = outputs.logits.argmax(dim=-1 )
UpperCamelCase : Tuple = batch["""labels"""]
UpperCamelCase , UpperCamelCase : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__A ,references=__A )
UpperCamelCase : Tuple = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = Accelerator(split_batches=__A ,dispatch_batches=__A )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(__A ,__A )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCamelCase : int = Accelerator(split_batches=__A ,dispatch_batches=__A )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(__A ,9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCamelCase : int = Accelerator()
test_torch_metrics(__A ,5_1_2 )
accelerator.state._reset_state()
def A_ ( snake_case_ : str ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 357 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Union[str, Any] = 'EncodecFeatureExtractor'
lowercase : List[Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.feature_extractor
UpperCamelCase : Any = False
def a_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=SCREAMING_SNAKE_CASE_ , language=SCREAMING_SNAKE_CASE_ , no_timestamps=SCREAMING_SNAKE_CASE_ )
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : Any = args[0]
UpperCamelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
UpperCamelCase : Optional[int] = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is not None:
UpperCamelCase : str = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCamelCase : int = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
UpperCamelCase : Optional[Any] = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = kwargs.pop("""padding_mask""" , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : Optional[int] = args[0]
UpperCamelCase : Any = args[1:]
if audio_values is not None:
return self._decode_audio(SCREAMING_SNAKE_CASE_ , padding_mask=SCREAMING_SNAKE_CASE_ )
else:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Dict = to_numpy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = audio_values.shape
if padding_mask is None:
return list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = to_numpy(SCREAMING_SNAKE_CASE_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCamelCase : List[str] = seq_len - padding_mask.shape[-1]
UpperCamelCase : Optional[int] = 1 - self.feature_extractor.padding_value
UpperCamelCase : Any = np.pad(SCREAMING_SNAKE_CASE_ , ((0, 0), (0, difference)) , """constant""" , constant_values=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audio_values.tolist()
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCamelCase : Optional[Any] = sliced_audio.reshape(SCREAMING_SNAKE_CASE_ , -1 )
return audio_values
| 27 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : List[Any] ) -> List[Any]:
lowercase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
lowercase : Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
lowercase : List[Any] = tokenizer('Hello there', return_tensors='tf' ).input_ids
lowercase : Any = tokenizer('Hi I am', return_tensors='tf' ).input_ids
lowercase : Dict = model(lowerCAmelCase, labels=lowerCAmelCase ).loss
lowercase : Optional[int] = -tf.math.reduce_mean(lowerCAmelCase ).numpy()
lowercase : Tuple = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 255 | 0 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = None
@experimental
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ) -> Optional[int]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return _map_with_joblib(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[int] = num_proc if num_proc <= len(_lowerCAmelCase ) else len(_lowerCAmelCase )
A_ : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowerCAmelCase ):
A_ : Optional[Any] = len(_lowerCAmelCase ) // num_proc
A_ : Union[str, Any] = len(_lowerCAmelCase ) % num_proc
A_ : int = div * index + min(_lowerCAmelCase , _lowerCAmelCase )
A_ : Optional[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowerCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(_lowerCAmelCase )}, "
f"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
f"Spawning {num_proc} processes for {len(_lowerCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
A_ : Optional[Any] = None, None
if not disable_tqdm:
A_ : int = (RLock(),), tqdm.set_lock
with Pool(_lowerCAmelCase , initargs=_lowerCAmelCase , initializer=_lowerCAmelCase ) as pool:
A_ : Optional[Any] = pool.map(_lowerCAmelCase , _lowerCAmelCase )
logger.info(f"Finished {num_proc} processes" )
A_ : List[Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(_lowerCAmelCase )} objects" )
return mapped
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> List[str]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_lowerCAmelCase ):
return joblib.Parallel()(
joblib.delayed(_lowerCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __snake_case ( _lowerCAmelCase : str ) -> List[Any]:
A_ : str = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
A_ : Any = None
| 369 |
from math import pi, sqrt
def __snake_case ( _lowerCAmelCase : float ) -> float:
if num <= 0:
raise ValueError("math domain error" )
if num > 1_71.5:
raise OverflowError("math range error" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __snake_case ( ) -> None:
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : List[str] = 1.0
while num:
_lowerCAmelCase : List[str] = float(input('''Gamma of: '''))
print(F'''gamma({num}) = {gamma(num)}''')
print('''\nEnter 0 to exit...''')
| 70 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[str] = state_dict.pop(snake_case__ )
__UpperCamelCase : Tuple = val
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCamelCase : Union[str, Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__UpperCamelCase : int = value
else:
__UpperCamelCase : Any = value
return new_state_dict
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase : Union[str, Any] = ""
if is_panoptic:
__UpperCamelCase : Optional[int] = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCamelCase : Dict = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase : List[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Union[str, Any] = in_proj_weight[:256, :]
__UpperCamelCase : List[str] = in_proj_bias[:256]
__UpperCamelCase : str = in_proj_weight[256:512, :]
__UpperCamelCase : int = in_proj_bias[256:512]
__UpperCamelCase : int = in_proj_weight[-256:, :]
__UpperCamelCase : Any = in_proj_bias[-256:]
def __lowerCAmelCase ( ):
__UpperCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCamelCase : Any = "resnet101"
if "dc5" in model_name:
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : List[Any] = "panoptic" in model_name
if is_panoptic:
__UpperCamelCase : Optional[int] = 250
else:
__UpperCamelCase : str = 91
__UpperCamelCase : Optional[int] = "huggingface/label-files"
__UpperCamelCase : Any = "coco-detection-id2label.json"
__UpperCamelCase : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Dict = idalabel
__UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load image processor
__UpperCamelCase : int = "coco_panoptic" if is_panoptic else "coco_detection"
__UpperCamelCase : Union[str, Any] = ConditionalDetrImageProcessor(format=snake_case__ )
# prepare image
__UpperCamelCase : List[Any] = prepare_img()
__UpperCamelCase : str = image_processor(images=snake_case__ , return_tensors="pt" )
__UpperCamelCase : str = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
__UpperCamelCase : Tuple = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCamelCase : str = "conditional_detr." + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
__UpperCamelCase : Any = rename_backbone_keys(snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCamelCase : Optional[Any] = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__UpperCamelCase : List[Any] = state_dict.pop(snake_case__ )
__UpperCamelCase : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCamelCase : Tuple = state_dict.pop(snake_case__ )
__UpperCamelCase : Dict = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
__UpperCamelCase : Optional[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
__UpperCamelCase : List[Any] = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase : Optional[Any] = ConditionalDetrForSegmentation(snake_case__ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
model.push_to_hub(repo_id=snake_case__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
__UpperCamelCase : Dict = conditional_detr(snake_case__ )
__UpperCamelCase : Any = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 298 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298 | 1 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Any = CLIPConfig
__UpperCAmelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self : List[Any], UpperCAmelCase__ : CLIPConfig ):
super().__init__(UpperCAmelCase__ )
__lowercase = CLIPVisionModelWithProjection(config.vision_config )
__lowercase = nn.Linear(config.vision_config.projection_dim, 1 )
__lowercase = nn.Linear(config.vision_config.projection_dim, 1 )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[str]=0.5, UpperCAmelCase__ : Any=0.5 ):
__lowercase = self.vision_model(UpperCAmelCase__ )[0]
__lowercase = self.p_head(UpperCAmelCase__ )
__lowercase = nsfw_detected.flatten()
__lowercase = nsfw_detected > p_threshold
__lowercase = nsfw_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(UpperCAmelCase__ ):
if nsfw_detected_:
__lowercase = np.zeros(images[idx].shape )
__lowercase = self.w_head(UpperCAmelCase__ )
__lowercase = watermark_detected.flatten()
__lowercase = watermark_detected > w_threshold
__lowercase = watermark_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(UpperCAmelCase__ ):
if watermark_detected_:
__lowercase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 144 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_a = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_a , _a = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_a = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_a = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_a = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 144 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_UpperCAmelCase = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_UpperCAmelCase = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
A_ : int = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=__lowercase )[0]
@deprecated(__lowercase ,'Please use tf.data to implement this functionality.' )
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
print('Extracting' ,f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
A_ : Any = _readaa(__lowercase )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
A_ : Any = _readaa(__lowercase )
A_ : Optional[Any] = _readaa(__lowercase )
A_ : Dict = _readaa(__lowercase )
A_ : Dict = bytestream.read(rows * cols * num_images )
A_ : Union[str, Any] = numpy.frombuffer(__lowercase ,dtype=numpy.uinta )
A_ : Dict = data.reshape(__lowercase ,__lowercase ,__lowercase ,1 )
return data
@deprecated(__lowercase ,'Please use tf.one_hot on tensors.' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : str ):
'''simple docstring'''
A_ : Dict = labels_dense.shape[0]
A_ : Tuple = numpy.arange(__lowercase ) * num_classes
A_ : Any = numpy.zeros((num_labels, num_classes) )
A_ : List[Any] = 1
return labels_one_hot
@deprecated(__lowercase ,'Please use tf.data to implement this functionality.' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[int]=False ,__lowercase : Tuple=10 ):
'''simple docstring'''
print('Extracting' ,f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
A_ : Optional[Any] = _readaa(__lowercase )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
A_ : Dict = _readaa(__lowercase )
A_ : Union[str, Any] = bytestream.read(__lowercase )
A_ : Optional[Any] = numpy.frombuffer(__lowercase ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowercase ,__lowercase )
return labels
class UpperCAmelCase :
'''simple docstring'''
@deprecated(
lowercase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , lowercase , lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=None , ):
"""simple docstring"""
A_ , A_ : Optional[int] = random_seed.get_seed(lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A_ : int = dtypes.as_dtype(lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
A_ : List[Any] = 1_0_0_0_0
A_ : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
A_ : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A_ : str = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A_ : List[str] = images.astype(numpy.floataa )
A_ : int = numpy.multiply(lowercase , 1.0 / 255.0 )
A_ : Any = images
A_ : Optional[Any] = labels
A_ : Dict = 0
A_ : int = 0
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._images
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._labels
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._num_examples
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._epochs_completed
def lowerCAmelCase_ ( self , lowercase , lowercase=False , lowercase=True ):
"""simple docstring"""
if fake_data:
A_ : List[Any] = [1] * 7_8_4
A_ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase )],
[fake_label for _ in range(lowercase )],
)
A_ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A_ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
A_ : Union[str, Any] = self.images[perma]
A_ : Optional[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A_ : Tuple = self._num_examples - start
A_ : Any = self._images[start : self._num_examples]
A_ : Optional[int] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A_ : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
A_ : Optional[Any] = self.images[perm]
A_ : int = self.labels[perm]
# Start next epoch
A_ : Dict = 0
A_ : List[Any] = batch_size - rest_num_examples
A_ : Tuple = self._index_in_epoch
A_ : Any = self._images[start:end]
A_ : Optional[int] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A_ : List[str] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowercase ,'Please write your own downloading logic.' )
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
if not gfile.Exists(__lowercase ):
gfile.MakeDirs(__lowercase )
A_ : Optional[Any] = os.path.join(__lowercase ,__lowercase )
if not gfile.Exists(__lowercase ):
urllib.request.urlretrieve(__lowercase ,__lowercase ) # noqa: S310
with gfile.GFile(__lowercase ) as f:
A_ : List[str] = f.size()
print('Successfully downloaded' ,__lowercase ,__lowercase ,'bytes.' )
return filepath
@deprecated(
__lowercase ,'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Tuple=False ,__lowercase : int=False ,__lowercase : str=dtypes.floataa ,__lowercase : Dict=True ,__lowercase : List[Any]=50_00 ,__lowercase : Optional[int]=None ,__lowercase : Optional[Any]=DEFAULT_SOURCE_URL ,):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=__lowercase ,one_hot=__lowercase ,dtype=__lowercase ,seed=__lowercase )
A_ : List[Any] = fake()
A_ : str = fake()
A_ : str = fake()
return _Datasets(train=__lowercase ,validation=__lowercase ,test=__lowercase )
if not source_url: # empty string check
A_ : Optional[Any] = DEFAULT_SOURCE_URL
A_ : Optional[int] = 'train-images-idx3-ubyte.gz'
A_ : List[str] = 'train-labels-idx1-ubyte.gz'
A_ : Union[str, Any] = 't10k-images-idx3-ubyte.gz'
A_ : Any = 't10k-labels-idx1-ubyte.gz'
A_ : Tuple = _maybe_download(
__lowercase ,__lowercase ,source_url + train_images_file )
with gfile.Open(__lowercase ,'rb' ) as f:
A_ : int = _extract_images(__lowercase )
A_ : Dict = _maybe_download(
__lowercase ,__lowercase ,source_url + train_labels_file )
with gfile.Open(__lowercase ,'rb' ) as f:
A_ : int = _extract_labels(__lowercase ,one_hot=__lowercase )
A_ : Any = _maybe_download(
__lowercase ,__lowercase ,source_url + test_images_file )
with gfile.Open(__lowercase ,'rb' ) as f:
A_ : Optional[int] = _extract_images(__lowercase )
A_ : Optional[Any] = _maybe_download(
__lowercase ,__lowercase ,source_url + test_labels_file )
with gfile.Open(__lowercase ,'rb' ) as f:
A_ : Dict = _extract_labels(__lowercase ,one_hot=__lowercase )
if not 0 <= validation_size <= len(__lowercase ):
A_ : Union[str, Any] = (
'Validation size should be between 0 and '
f'''{len(__lowercase )}. Received: {validation_size}.'''
)
raise ValueError(__lowercase )
A_ : Optional[Any] = train_images[:validation_size]
A_ : Optional[Any] = train_labels[:validation_size]
A_ : Any = train_images[validation_size:]
A_ : Optional[Any] = train_labels[validation_size:]
A_ : int = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
A_ : int = _DataSet(__lowercase ,__lowercase ,**__lowercase )
A_ : Any = _DataSet(__lowercase ,__lowercase ,**__lowercase )
A_ : Any = _DataSet(__lowercase ,__lowercase ,**__lowercase )
return _Datasets(train=__lowercase ,validation=__lowercase ,test=__lowercase )
| 140 | import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if isinstance(__lowercase ,__lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : Tuple ,__lowercase : Dict=False ):
'''simple docstring'''
A_ : str = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
A_ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
A_ : Tuple = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
A_ : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
A_ : Any = checkpoint[f'''{old_prefix}.skip_connection.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : List[str] ,__lowercase : int ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any]=None ):
'''simple docstring'''
A_ , A_ , A_ : Tuple = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 ,dim=0 )
A_ , A_ , A_ : List[str] = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 ,dim=0 )
A_ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
A_ : str = checkpoint[f'''{old_prefix}.norm.bias''']
A_ : int = weight_q.squeeze(-1 ).squeeze(-1 )
A_ : int = bias_q.squeeze(-1 ).squeeze(-1 )
A_ : List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
A_ : Any = bias_k.squeeze(-1 ).squeeze(-1 )
A_ : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
A_ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
A_ : Any = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
A_ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( __lowercase : str ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = torch.load(__lowercase ,map_location='cpu' )
A_ : Dict = {}
A_ : Dict = checkpoint['time_embed.0.weight']
A_ : Any = checkpoint['time_embed.0.bias']
A_ : Union[str, Any] = checkpoint['time_embed.2.weight']
A_ : Tuple = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
A_ : Dict = checkpoint['label_emb.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.bias']
A_ : str = unet_config['down_block_types']
A_ : List[str] = unet_config['layers_per_block']
A_ : Any = unet_config['attention_head_dim']
A_ : int = unet_config['block_out_channels']
A_ : Union[str, Any] = 1
A_ : List[str] = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
A_ : List[Any] = channels_list[i]
A_ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
A_ : Any = f'''down_blocks.{i}.resnets.{j}'''
A_ : str = f'''input_blocks.{current_layer}.0'''
A_ : List[Any] = True if j == 0 and downsample_block_has_skip else False
A_ : Any = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
A_ : Dict = f'''down_blocks.{i}.resnets.{j}'''
A_ : Optional[int] = f'''input_blocks.{current_layer}.0'''
A_ : str = True if j == 0 and downsample_block_has_skip else False
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''down_blocks.{i}.attentions.{j}'''
A_ : Union[str, Any] = f'''input_blocks.{current_layer}.1'''
A_ : Tuple = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : List[Any] = f'''down_blocks.{i}.downsamplers.0'''
A_ : Dict = f'''input_blocks.{current_layer}.0'''
A_ : Tuple = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
A_ : Tuple = current_channels
# hardcoded the mid-block for now
A_ : int = 'mid_block.resnets.0'
A_ : Dict = 'middle_block.0'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 'mid_block.attentions.0'
A_ : Any = 'middle_block.1'
A_ : Tuple = convert_attention(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Union[str, Any] = 'mid_block.resnets.1'
A_ : Any = 'middle_block.2'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 0
A_ : Optional[Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A_ : Dict = f'''up_blocks.{i}.resnets.{j}'''
A_ : Dict = f'''output_blocks.{current_layer}.0'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Union[str, Any] = f'''up_blocks.{i}.upsamplers.0'''
A_ : List[str] = f'''output_blocks.{current_layer-1}.1'''
A_ : Optional[Any] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A_ : int = f'''up_blocks.{i}.resnets.{j}'''
A_ : Union[str, Any] = f'''output_blocks.{current_layer}.0'''
A_ : Optional[int] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
A_ : Any = f'''output_blocks.{current_layer}.1'''
A_ : List[str] = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Dict = f'''up_blocks.{i}.upsamplers.0'''
A_ : Any = f'''output_blocks.{current_layer-1}.2'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Any = checkpoint['out.0.weight']
A_ : Dict = checkpoint['out.0.bias']
A_ : int = checkpoint['out.2.weight']
A_ : List[str] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = strabool(args.class_cond)
_UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
_UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
_UpperCAmelCase = None
_UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
_UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
_UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 140 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ : Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : List[str] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCAmelCase_ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 279 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case_ : str = len(_SCREAMING_SNAKE_CASE ) - 1
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _SCREAMING_SNAKE_CASE ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_SCREAMING_SNAKE_CASE ) , 5 ) == 1
return output_values
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ : Union[str, Any] = self.basis_function(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = 0.0
snake_case_ : List[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE = 0.01 ) -> int:
from matplotlib import pyplot as plt # type: ignore
snake_case_ : list[float] = [] # x coordinates of points to plot
snake_case_ : list[float] = [] # y coordinates of points to plot
snake_case_ : Dict = 0.0
while t <= 1:
snake_case_ : List[str] = self.bezier_curve_function(_SCREAMING_SNAKE_CASE )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case_ : Union[str, Any] = [i[0] for i in self.list_of_points]
snake_case_ : Any = [i[1] for i in self.list_of_points]
plt.plot(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 370 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase__ ( ):
snake_case_ : str = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_a , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_a , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_a )
return parser.parse_args()
def lowerCAmelCase__ ( ):
snake_case_ : str = parse_args()
# Import training_script as a module.
snake_case_ : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : Tuple = script_fpath.stem
snake_case_ : str = importlib.import_module(_a )
# Patch sys.argv
snake_case_ : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 36 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
lowercase = downstream_dict["""projector.weight"""]
lowercase = downstream_dict["""projector.bias"""]
lowercase = downstream_dict["""model.post_net.linear.weight"""]
lowercase = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] ) -> Dict:
'''simple docstring'''
lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
lowercase = downstream_dict["""model.linear.weight"""]
lowercase = downstream_dict["""model.linear.bias"""]
return model
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase = UniSpeechSatForXVector.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
lowercase = downstream_dict["""connector.weight"""]
lowercase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowercase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowercase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
lowercase = checkpoint["""Downstream"""]
lowercase = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
lowercase = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowercase = convert_classification(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowercase = convert_diarization(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith("""ForXVector""" ):
lowercase = convert_xvector(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowercase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 197 | """simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = inputs["""prompt"""]
lowercase = inputs["""generator"""]
lowercase = inputs["""num_inference_steps"""]
lowercase = inputs["""output_type"""]
if "image" in inputs:
lowercase = inputs["""image"""]
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs["""mask_image"""]
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs["""original_image"""]
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
lowercase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = inputs["""generator"""]
lowercase = inputs["""num_inference_steps"""]
lowercase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowercase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**__lowerCAmelCase )[0]
lowercase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe_loaded(**__lowerCAmelCase )[0]
lowercase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
| 197 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
debug_launcher(test_script.main )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
debug_launcher(test_ops.main ) | 353 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCamelCase : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" )
if "norm" in key:
_lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" )
if "layer_norm1" in key:
_lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
_lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" )
if "attn.q" in key:
_lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCamelCase : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCamelCase : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCamelCase : int = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" )
if key.startswith("head" ):
_lowerCamelCase : List[str] = key.replace("head" , "classifier" )
_lowerCamelCase : Union[str, Any] = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : int = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = SegformerConfig()
_lowerCamelCase : int = False
# set attributes based on model_name
_lowerCamelCase : Any = "huggingface/label-files"
if "segformer" in model_name:
_lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCamelCase : str = 150
_lowerCamelCase : Dict = "ade20k-id2label.json"
_lowerCamelCase : Dict = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCamelCase : List[str] = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
_lowerCamelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = model_name[4:6]
_lowerCamelCase : Tuple = 1000
_lowerCamelCase : List[Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : int = 256
elif size == "b2":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : List[Any] = 768
_lowerCamelCase : Any = [3, 4, 6, 3]
elif size == "b3":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCamelCase : str = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 768
_lowerCamelCase : Dict = [3, 8, 27, 3]
elif size == "b5":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Tuple = 768
_lowerCamelCase : Tuple = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCamelCase : Any = model(_lowerCamelCase )
_lowerCamelCase : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCamelCase : int = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCamelCase : List[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_lowerCamelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175 | 1 |
def __A ( __lowerCamelCase = 1000 ) -> int:
return sum(e for e in range(3 , __lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 347 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = torch.permute(__lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ):
# linear layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if "metadata" in layer:
a = layer.split("""metadata""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a = layer.split("""kvstore""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a = layer.split("""/""" )
a = """/""".join(split_layer[:-1] )
a = (split_layer[-1],)
if "kvstore/path" in layer:
a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a = """file"""
else:
a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = rename_keys(__lowerCamelCase )
a = {}
for k, v in current_block.items():
a = v
a = new_current_block
torch.save(__lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
a = convert_file_size_to_int(__lowerCamelCase )
a = []
a = {}
a = 0
a = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a = flatten_dict(__lowerCamelCase , sep="""/""" )
a = {}
for layer in checkpoint_info.keys():
a , a , a = get_key_and_tensorstore_dict(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if curr_real_layer_name in all_layers:
a = content
else:
a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a = torch.tensor(__lowerCamelCase )
a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase )
a = """/""".join(__lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a = os.path.join(
__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a = {}
a = 0
a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a = {}
a = {}
for idx, shard in enumerate(__lowerCamelCase ):
a = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
a = shard
for key in shard:
a = shard_file
# Add the metadata
a = {"""total_size""": total_size}
a = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __A ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a = TaTokenizer.from_pretrained("""t5-small""" )
a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
a = model.generate(__lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 347 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = mock.Mock()
A : str = 500
A : List[str] = {}
A : List[Any] = HTTPError
A : str = {}
# Download this model to make sure it's in the cache.
A : Tuple = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE ) as mock_head:
A : Dict = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : str = mock.Mock()
A : Optional[Any] = 500
A : str = {}
A : Tuple = HTTPError
A : str = {}
# Download this model to make sure it's in the cache.
A : str = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE ) as mock_head:
A : Union[str, Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
try:
A : Dict = tempfile.mktemp()
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , SCREAMING_SNAKE_CASE )
A : List[str] = AlbertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
finally:
os.remove(SCREAMING_SNAKE_CASE )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , SCREAMING_SNAKE_CASE )
A : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : int = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
"""simple docstring"""
A : List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A : str = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : List[Any] = BertTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
A : Optional[Any] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE , repo_id='''test-tokenizer''' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : int = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : Optional[int] = BertTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
A : List[Any] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : str = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
A : Dict = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : str = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
A : Any = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
A : Union[str, Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
A : str = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Dict = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Any = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[str] = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = Trie()
A : Optional[int] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(SCREAMING_SNAKE_CASE , ['''AB''', '''C'''] )
| 3 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__: str = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Union[str, Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : str = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "yolos"
def __init__( self : List[str] , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3_072 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : List[Any]=1E-12 , _lowerCAmelCase : Any=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=100 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=5 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : List[Any]=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = num_detection_tokens
SCREAMING_SNAKE_CASE_ = use_mid_position_embeddings
SCREAMING_SNAKE_CASE_ = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase_ ( self : int ):
return 12
| 356 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 210 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __A( a ):
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case ) -> int:
'''simple docstring'''
__a = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=_snake_case , default=_snake_case , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=_snake_case , help='''Name of the model to download''' )
download_parser.set_defaults(func=_snake_case )
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Dict:
'''simple docstring'''
__a = model
__a = cache
__a = force
__a = trust_remote_code
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 6 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ =logging.get_logger(__name__)
def a_ ( _lowercase ):
_UpperCamelCase : int = DPTConfig()
if "large" in checkpoint_url:
_UpperCamelCase : Union[str, Any] = 1024
_UpperCamelCase : Optional[Any] = 4096
_UpperCamelCase : str = 24
_UpperCamelCase : Dict = 16
_UpperCamelCase : Optional[Any] = [5, 11, 17, 23]
_UpperCamelCase : List[str] = [256, 512, 1024, 1024]
_UpperCamelCase : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
_UpperCamelCase : List[str] = True
_UpperCamelCase : Union[str, Any] = 150
_UpperCamelCase : List[Any] = '''huggingface/label-files'''
_UpperCamelCase : Optional[int] = '''ade20k-id2label.json'''
_UpperCamelCase : Any = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase : str = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Tuple = idalabel
_UpperCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Dict = [1, 150, 480, 480]
return config, expected_shape
def a_ ( _lowercase ):
_UpperCamelCase : Optional[int] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def a_ ( _lowercase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase : Optional[int] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_UpperCamelCase : List[str] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_UpperCamelCase : Any = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
_UpperCamelCase : List[str] = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_UpperCamelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_UpperCamelCase : List[str] = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_UpperCamelCase : Tuple = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_UpperCamelCase : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCamelCase : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
_UpperCamelCase : List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_UpperCamelCase : List[str] = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_UpperCamelCase : List[Any] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_UpperCamelCase : str = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_UpperCamelCase : int = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_UpperCamelCase : Tuple = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_UpperCamelCase : List[Any] = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_UpperCamelCase : Optional[int] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_UpperCamelCase : Dict = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase : List[Any] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_UpperCamelCase : Dict = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_UpperCamelCase : List[str] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_UpperCamelCase : Tuple = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_UpperCamelCase : int = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase : str = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase : int = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase : str = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase : int = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase : Optional[Any] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase : Dict = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase : int = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase : List[str] = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase : Tuple = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_UpperCamelCase : List[Any] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_UpperCamelCase : Any = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_UpperCamelCase : str = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def a_ ( _lowercase , _lowercase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : str = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_UpperCamelCase : Dict = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_UpperCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def a_ ( ):
_UpperCamelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : Optional[Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase ):
_UpperCamelCase , _UpperCamelCase : Any = get_dpt_config(lowercase_ )
# load original state_dict from URL
_UpperCamelCase : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowercase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase : Any = state_dict.pop(lowercase_ )
_UpperCamelCase : Dict = val
# read in qkv matrices
read_in_q_k_v(lowercase_ , lowercase_ )
# load HuggingFace model
_UpperCamelCase : Union[str, Any] = DPTForSemanticSegmentation(lowercase_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# Check outputs on an image
_UpperCamelCase : Tuple = 480 if '''ade''' in checkpoint_url else 384
_UpperCamelCase : Dict = DPTImageProcessor(size=lowercase_ )
_UpperCamelCase : Optional[int] = prepare_img()
_UpperCamelCase : Tuple = image_processor(lowercase_ , return_tensors='''pt''' )
# forward pass
_UpperCamelCase : Optional[Any] = model(**lowercase_ ).logits if '''ade''' in checkpoint_url else model(**lowercase_ ).predicted_depth
# Assert logits
_UpperCamelCase : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
_UpperCamelCase : Dict = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowercase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowercase_ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowercase_ )
)
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowercase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you\'re pushing to the hub.""",
)
UpperCamelCase_ =parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 362 |
"""simple docstring"""
def a_ ( _lowercase , _lowercase ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
_UpperCamelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowercase ) )
return round(_lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 0 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCAmelCase = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 273 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Optional[int] = StableDiffusionControlNetImgaImgPipeline
_lowercase: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase: Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_lowercase: Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[str] ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_lowerCAmelCase = CLIPTextModel(__snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Any , __snake_case : str , __snake_case : Any=0 ) -> str:
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = 2
_lowerCAmelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , )
_lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowercase__ ( self : Optional[int] ) -> List[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : Tuple ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowercase__ ( self : Tuple ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Any = StableDiffusionControlNetImgaImgPipeline
_lowercase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase: Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__snake_case : Optional[Any] ):
if isinstance(__snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_lowerCAmelCase = CLIPTextModel(__snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
_lowerCAmelCase = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : List[str]=0 ) -> Union[str, Any]:
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = 2
_lowerCAmelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
]
_lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowercase__ ( self : List[str] ) -> Dict:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
_lowerCAmelCase = 10.0
_lowerCAmelCase = 4
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowercase__ ( self : int ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowercase__ ( self : int ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Any:
_lowerCAmelCase = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
_lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__snake_case , controlnet=__snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = """evil space-punk bird"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) )
_lowerCAmelCase = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) )
_lowerCAmelCase = pipe(
__snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 70 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A =logging.get_logger(__name__)
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase :List[Any] = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PIL.Image.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**_SCREAMING_SNAKE_CASE)
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
UpperCAmelCase__ : int = get_size_dict(_SCREAMING_SNAKE_CASE)
UpperCAmelCase__ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase__ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""")
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : Tuple = size
UpperCAmelCase__ : Tuple = resample
UpperCAmelCase__ : Dict = do_center_crop
UpperCAmelCase__ : Any = crop_size
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''')
return resize(
_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : str = get_size_dict(_SCREAMING_SNAKE_CASE)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''')
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCAmelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Tuple = resample if resample is not None else self.resample
UpperCAmelCase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Optional[Any] = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_SCREAMING_SNAKE_CASE)
UpperCAmelCase__ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""")
UpperCAmelCase__ : int = make_list_of_images(_SCREAMING_SNAKE_CASE)
if not valid_images(_SCREAMING_SNAKE_CASE):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE) for image in images]
if do_resize:
UpperCAmelCase__ : Optional[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE) for image in images]
if do_center_crop:
UpperCAmelCase__ : List[Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE) for image in images]
if do_normalize:
UpperCAmelCase__ : List[str] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE) for image in images]
UpperCAmelCase__ : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for image in images]
UpperCAmelCase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE) | 364 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Any = g.get_repo("""huggingface/diffusers""" )
UpperCAmelCase__ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Any = sorted(issue.get_comments() , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main() | 283 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A__ : str = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Optional[int] , *snake_case__ : List[str] , **snake_case__ : List[Any] ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 144 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline
_UpperCAmelCase :List[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase :List[str] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase :Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase :str = False
@property
def UpperCAmelCase__ ( self : Tuple ):
return 32
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : int ):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return 100
@property
def UpperCAmelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] ={
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase_ : Union[str, Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self : Any ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCamelCase_ : int =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =self.dummy_unet
lowerCamelCase_ : Optional[Any] =self.dummy_movq
lowerCamelCase_ : Optional[Any] ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCamelCase_ : Optional[Any] =DDIMScheduler(**snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str , snake_case__ : str=0 ):
lowerCamelCase_ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCamelCase_ : Optional[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCamelCase_ : List[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCamelCase_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Tuple =Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
# create hint
lowerCamelCase_ : Dict =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("mps" ):
lowerCamelCase_ : List[Any] =torch.manual_seed(snake_case__ )
else:
lowerCamelCase_ : List[str] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCamelCase_ : Dict ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any ="cpu"
lowerCamelCase_ : Dict =self.get_dummy_components()
lowerCamelCase_ : Dict =self.pipeline_class(**snake_case__ )
lowerCamelCase_ : str =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[Any] =pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : Dict =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCamelCase_ : List[str] =image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : Union[str, Any] =np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : List[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowerCamelCase_ : Optional[int] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCamelCase_ : Optional[int] =init_image.resize((512, 512) )
lowerCamelCase_ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCamelCase_ : Any =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
lowerCamelCase_ : Union[str, Any] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase_ : str ="A robot, 4k photo"
lowerCamelCase_ : List[Any] =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCamelCase_ : Any =KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
lowerCamelCase_ : List[str] =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple =torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ : Tuple =pipe_prior(
snake_case__ , image=snake_case__ , strength=0.85 , generator=snake_case__ , negative_prompt="" , ).to_tuple()
lowerCamelCase_ : str =pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
lowerCamelCase_ : Optional[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 144 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: tuple[int, int] , lowerCAmelCase: int )-> list[tuple[int, int]]:
_snake_case : Optional[int] = position
_snake_case : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_snake_case : Tuple = []
for position in positions:
_snake_case : Optional[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase )
return permissible_positions
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] )-> bool:
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: tuple[int, int] , lowerCAmelCase: int )-> bool:
if is_complete(lowerCAmelCase ):
return True
for position in get_valid_pos(lowerCAmelCase , len(lowerCAmelCase ) ):
_snake_case : List[Any] = position
if board[y][x] == 0:
_snake_case : List[Any] = curr + 1
if open_knight_tour_helper(lowerCAmelCase , lowerCAmelCase , curr + 1 ):
return True
_snake_case : Union[str, Any] = 0
return False
def lowerCamelCase_ ( lowerCAmelCase: int )-> list[list[int]]:
_snake_case : Optional[Any] = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = 1
if open_knight_tour_helper(lowerCAmelCase , (i, j) , 1 ):
return board
_snake_case : Optional[Any] = 0
_snake_case : Dict = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
while a != 0:
_snake_case , _snake_case : Optional[Any] = b % a, a
return b
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
if gcd(lowerCAmelCase , lowerCAmelCase ) != 1:
_snake_case : Any = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase )
_snake_case , _snake_case , _snake_case : Optional[Any] = 1, 0, a
_snake_case , _snake_case , _snake_case : Optional[int] = 0, 1, m
while va != 0:
_snake_case : Dict = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 260 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
snake_case_ : Tuple = MaskFormerConfig(backbone_config=_UpperCamelCase )
snake_case_ : Dict = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
snake_case_ : Optional[Any] = 847
snake_case_ : Tuple = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
snake_case_ : Optional[int] = 150
snake_case_ : List[str] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
snake_case_ : Optional[Any] = 171
snake_case_ : Dict = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
snake_case_ : Tuple = 133
snake_case_ : Union[str, Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
snake_case_ : str = 19
snake_case_ : Dict = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
snake_case_ : List[Any] = 65
snake_case_ : Optional[Any] = '''mapillary-vistas-id2label.json'''
snake_case_ : List[Any] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Tuple = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = dct.pop(_UpperCamelCase )
snake_case_ : str = val
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case_ : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case_ : Dict = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
snake_case_ : Union[str, Any] = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Optional[int] = in_proj_weight[:dim, :]
snake_case_ : str = in_proj_bias[: dim]
snake_case_ : int = in_proj_weight[
dim : dim * 2, :
]
snake_case_ : Any = in_proj_bias[
dim : dim * 2
]
snake_case_ : List[str] = in_proj_weight[
-dim :, :
]
snake_case_ : List[Any] = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case_ : Dict = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
snake_case_ : Optional[Any] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Union[str, Any] = in_proj_weight[: hidden_size, :]
snake_case_ : Dict = in_proj_bias[:config.hidden_size]
snake_case_ : str = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case_ : int = in_proj_weight[-hidden_size :, :]
snake_case_ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
snake_case_ : List[str] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : str = in_proj_weight[: hidden_size, :]
snake_case_ : List[str] = in_proj_bias[:config.hidden_size]
snake_case_ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case_ : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case_ : Any = in_proj_weight[-hidden_size :, :]
snake_case_ : Union[str, Any] = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCamelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
snake_case_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> int:
"""simple docstring"""
snake_case_ : Tuple = get_maskformer_config(_UpperCamelCase )
# load original state_dict
with open(_UpperCamelCase , '''rb''' ) as f:
snake_case_ : Optional[Any] = pickle.load(_UpperCamelCase )
snake_case_ : Optional[int] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case_ : str = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_swin_q_k_v(_UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCamelCase , _UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
snake_case_ : str = torch.from_numpy(_UpperCamelCase )
# load 🤗 model
snake_case_ : Tuple = MaskFormerForInstanceSegmentation(_UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCamelCase , param.shape )
snake_case_ , snake_case_ : Tuple = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
snake_case_ : Optional[Any] = prepare_img()
if "vistas" in model_name:
snake_case_ : List[str] = 65
elif "cityscapes" in model_name:
snake_case_ : Dict = 65_535
else:
snake_case_ : Optional[int] = 255
snake_case_ : Dict = True if '''ade''' in model_name else False
snake_case_ : Optional[int] = MaskFormerImageProcessor(ignore_index=_UpperCamelCase , reduce_labels=_UpperCamelCase )
snake_case_ : str = image_processor(_UpperCamelCase , return_tensors='''pt''' )
snake_case_ : Any = model(**_UpperCamelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case_ : Optional[int] = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 279 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = SpeechTaTokenizer
lowerCamelCase_ : int = False
lowerCamelCase_ : Dict = True
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ )
snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case_ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = '''this is a test'''
snake_case_ : int = '''this is a test'''
return input_text, output_text
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ )
snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = '''<pad>'''
snake_case_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__magic_name__ ) , 81 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : int = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
snake_case_ : Tuple = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# fmt: off
self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
snake_case_ : List[Any] = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
| 279 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'canine'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=16384 , lowercase=16 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase=0XE000 , lowercase=0XE001 , lowercase=4 , lowercase=4 , lowercase=8 , lowercase=16384 , lowercase=128 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : Any = max_position_embeddings
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : List[Any] = initializer_range
A_ : Optional[int] = type_vocab_size
A_ : Dict = layer_norm_eps
# Character config:
A_ : Optional[Any] = downsampling_rate
A_ : Optional[int] = upsampling_kernel_size
A_ : Optional[int] = num_hash_functions
A_ : int = num_hash_buckets
A_ : Any = local_transformer_stride | 135 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = '''src/transformers'''
A_ = '''docs/source/en/tasks'''
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case : Optional[Any] = f.readlines()
# Find the start prompt.
_snake_case : Union[str, Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
_snake_case : Optional[int] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
_snake_case : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
_snake_case : Dict = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Optional[int]=False ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
_snake_case : Optional[int] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
""" to fix this.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 64 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 36 | 0 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__UpperCamelCase =Vector()
def UpperCAmelCase_ ( self : List[str] ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase__ ) , '''(0,0,0,0,0,1)''' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
def UpperCAmelCase_ ( self : Optional[int] ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2] )
__UpperCamelCase =Vector([1, 2, 3, 4, 5] )
__UpperCamelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__UpperCamelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def UpperCAmelCase_ ( self : str ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
__UpperCamelCase =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase_ ( self : int ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
__UpperCamelCase =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase_ ( self : Tuple ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
__UpperCamelCase =Vector([2, -1, 4] ) # for test of dot product
__UpperCamelCase =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase_ ( self : List[Any] ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def UpperCAmelCase_ ( self : str ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def UpperCAmelCase_ ( self : Any ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
__UpperCamelCase =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase__ , UpperCamelCase__ ) ) , '''(3,4,7)''' )
def UpperCAmelCase_ ( self : List[Any] ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 0, 0, 0, 0, 0] )
__UpperCamelCase =x.copy()
self.assertEqual(str(UpperCamelCase__ ) , str(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase__ ) , '''(0,1,0)''' )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase_ ( self : int ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__UpperCamelCase =Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 85 | """simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowercase = logging.getLogger(__name__)
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__UpperCamelCase , default=1_0_0_0 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCamelCase , default=5_1_2 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__UpperCamelCase =parser.parse_args()
return args
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
def fn(__UpperCamelCase : Union[str, Any] ):
return tokenizer(examples['''text'''] )
return fn
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =[]
for i in range(len(tokenized_data['''input_ids'''] ) ):
__UpperCamelCase ={
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__UpperCamelCase =tf.train.Features(feature=__UpperCamelCase )
__UpperCamelCase =tf.train.Example(features=__UpperCamelCase )
__UpperCamelCase =example.SerializeToString()
records.append(__UpperCamelCase )
return records
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__UpperCamelCase =min(len(__UpperCamelCase ) , args.limit )
__UpperCamelCase =dataset.select(range(__UpperCamelCase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
__UpperCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__UpperCamelCase =os.path.join(args.output_dir , args.split )
if not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
else:
__UpperCamelCase =os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__UpperCamelCase =tokenize_function(__UpperCamelCase )
__UpperCamelCase =dataset.map(__UpperCamelCase , batched=__UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCamelCase : Union[str, Any] ):
# Concatenate all texts.
__UpperCamelCase ={k: sum(examples[k] , [] ) for k in examples.keys()}
__UpperCamelCase =len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__UpperCamelCase =(total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__UpperCamelCase ={
k: [t[i : i + args.max_length] for i in range(0 , __UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__UpperCamelCase =dataset_tokenized.map(__UpperCamelCase , batched=__UpperCamelCase , batch_size=1_0_0_0 , num_proc=4 )
__UpperCamelCase =0
__UpperCamelCase =0
for shard in range(0 , len(__UpperCamelCase ) , args.shard_size ):
__UpperCamelCase =grouped_dataset[shard : shard + args.shard_size]
__UpperCamelCase =len(dataset_snapshot['''input_ids'''] )
__UpperCamelCase =os.path.join(__UpperCamelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
__UpperCamelCase =get_serialized_examples(__UpperCamelCase )
with tf.io.TFRecordWriter(__UpperCamelCase ) as out_file:
for i in range(len(__UpperCamelCase ) ):
__UpperCamelCase =serialized_examples[i]
out_file.write(__UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(__UpperCamelCase , __UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=__UpperCamelCase )
if __name__ == "__main__":
__lowercase = parse_args()
main(args)
| 85 | 1 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
UpperCAmelCase_ : Any = 50 # max width of layer names
UpperCAmelCase_ : List[Any] = 70 # max width of quantizer names
def _A (__a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=__a , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=__a , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=__a , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=__a , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=__a , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=__a , type=__a , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=__a , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _A (__a ) -> List[str]:
"""simple docstring"""
if args.calibrator == "max":
SCREAMING_SNAKE_CASE_ : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''histogram'''
elif args.calibrator == "mse":
SCREAMING_SNAKE_CASE_ : List[str] = '''histogram'''
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
SCREAMING_SNAKE_CASE_ : List[str] = QuantDescriptor(num_bits=args.aprec , calib_method=__a )
SCREAMING_SNAKE_CASE_ : Dict = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__a )
quant_nn.QuantLinear.set_default_quant_desc_weight(__a )
def _A (__a , __a , __a=False , __a=False ) -> Optional[Any]:
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__a , ['''embeddings'''] , which='''weight''' , _disabled=__a )
if args.quant_disable:
set_quantizer_by_name(__a , [''''''] , _disabled=__a )
if args.quant_disable_keyword:
set_quantizer_by_name(__a , args.quant_disable_keyword , _disabled=__a )
if args.quant_disable_layer_module:
set_quantizer_by_name(__a , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=__a )
if args.quant_enable_layer_module:
set_quantizer_by_name(__a , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=__a )
if args.recalibrate_weights:
recalibrate_weights(__a )
if args.fuse_qkv:
fuse_qkv(__a , __a )
if args.clip_gelu:
clip_gelu(__a , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__a )
def _A (__a ) -> int:
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__a )
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
def fusea(__a , __a , __a ):
for mod in [qq, qk, qv]:
if not hasattr(__a , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
SCREAMING_SNAKE_CASE_ : List[Any] = qq._amax.detach().item()
SCREAMING_SNAKE_CASE_ : Dict = qk._amax.detach().item()
SCREAMING_SNAKE_CASE_ : List[str] = qv._amax.detach().item()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(__a , __a , __a )
qq._amax.fill_(__a )
qk._amax.fill_(__a )
qv._amax.fill_(__a )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A (__a , __a ) -> List[str]:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def _A (__a ) -> Dict:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__a , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mod.weight.shape[0]
SCREAMING_SNAKE_CASE_ : Tuple = mod._weight_quantizer._amax.detach()
SCREAMING_SNAKE_CASE_ : str = torch.ones(__a , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def _A (__a ) -> List[Any]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__a , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
SCREAMING_SNAKE_CASE_ : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
SCREAMING_SNAKE_CASE_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
SCREAMING_SNAKE_CASE_ : Any = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__a , keepdims=__a ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
SCREAMING_SNAKE_CASE_ : Dict = amax
def _A (__a , __a=25 , __a=1_80 , __a=None ) -> List[str]:
"""simple docstring"""
if ignore is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
elif not isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = [ignore]
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(__a , '''weight''' ):
continue
SCREAMING_SNAKE_CASE_ : List[str] = max(__a , len(__a ) )
for name, mod in model.named_modules():
SCREAMING_SNAKE_CASE_ : int = getattr(__a , '''_input_quantizer''' , __a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(__a , '''_weight_quantizer''' , __a )
if not hasattr(__a , '''weight''' ):
continue
if type(__a ) in ignore:
continue
if [True for s in ignore if type(__a ) is str and s in name]:
continue
SCREAMING_SNAKE_CASE_ : List[str] = f'Act:{input_q.extra_repr()}'
SCREAMING_SNAKE_CASE_ : Tuple = f'Wgt:{weight_q.extra_repr()}'
SCREAMING_SNAKE_CASE_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(__a ) <= line_width:
logger.info(__a )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def _A (__a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for name, mod in model.named_modules():
if isinstance(__a , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def _A (__a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = getattr(__a , __a , __a )
if quantizer_mod is not None:
assert hasattr(__a , __a )
setattr(__a , __a , __a )
else:
logger.warning(f'{name} has no {quantizer}' )
def _A (__a , __a , __a="both" , **__a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(__a , __a , '''_input_quantizer''' , __a , __a )
if which in ["weight", "both"]:
set_quantizer(__a , __a , '''_weight_quantizer''' , __a , __a )
logger.info(__a )
def _A (__a , __a , **__a ) -> List[str]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__a , '''_input_quantizer''' ) or hasattr(__a , '''_weight_quantizer''' ):
for n in names:
if re.search(__a , __a ):
set_quantizers(__a , __a , **__a )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(__a , __a ):
SCREAMING_SNAKE_CASE_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(__a , __a , __a )
logger.info(__a )
| 91 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def UpperCamelCase_ ( *lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Any ):
'''simple docstring'''
pass
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : int = np.array(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = npimg.shape
return {"hash": hashimage(UpperCAmelCase_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
lowercase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowercase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
_UpperCamelCase : Tuple = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
_UpperCamelCase : Tuple = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = 'facebook/sam-vit-huge'
_UpperCamelCase : Dict = pipeline('mask-generation' ,model=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
_UpperCamelCase : int = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 354 |
'''simple docstring'''
from torch import nn
def A__ ( UpperCAmelCase_ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 236 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000 ) -> int:
return sum(e for e in range(3 , _SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A : int = False
@skip_mps
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = StableDiffusionAttendAndExcitePipeline
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[str] = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
__lowerCamelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a_ ( cls : Dict ) -> int:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def a_ ( cls : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
A__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ = CLIPTextModel(__lowerCAmelCase )
A__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("""mps""" ):
A__ = torch.manual_seed(__lowerCAmelCase )
else:
A__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
A__ = A__ = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = """cpu"""
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ = self.get_dummy_inputs(__lowerCAmelCase )
A__ = pipe(**__lowerCAmelCase ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A__ = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1e-3 )
def a_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a_ ( self : List[str] ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5e-4 )
def a_ ( self : str ) -> Any:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A (unittest.TestCase ):
'''simple docstring'''
@classmethod
def a_ ( cls : List[str] ) -> str:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def a_ ( cls : int ) -> Union[str, Any]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
A__ = torch.manual_seed(51 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
A__ = """a painting of an elephant with glasses"""
A__ = [5, 7]
A__ = pipe(
prompt=__lowerCAmelCase , token_indices=__lowerCAmelCase , guidance_scale=7.5 , generator=__lowerCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 276 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
A__ = {}
A__ = job["""started_at"""]
A__ = job["""completed_at"""]
A__ = date_parser.parse(__a )
A__ = date_parser.parse(__a )
A__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ = start
A__ = end
A__ = duration_in_min
return job_info
def __lowerCamelCase ( __a :Optional[Any] , __a :List[str]=None ) -> List[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ = requests.get(__a , headers=__a ).json()
A__ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(__a ):
A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
A : Dict = parser.parse_args()
A : List[Any] = get_job_time(args.workflow_run_id)
A : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 276 | 1 |
import argparse
import os
import re
_snake_case = """src/transformers"""
# Pattern that looks at the indentation in a line.
_snake_case = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
_snake_case = re.compile(R'''^\s*\"([^\"]+)\":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_snake_case = re.compile(R'''^\s*_import_structure\[\"([^\"]+)\"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
_snake_case = re.compile(R'''^\s*\"([^\"]+)\",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_snake_case = re.compile(R'''\[([^\]]+)\]''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = _re_indent.search(SCREAMING_SNAKE_CASE_ )
return "" if search is None else search.groups()[0]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE_ ):
index += 1
lowerCamelCase : Union[str, Any] = ["\n".join(lines[:index] )]
else:
lowerCamelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase : int = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE_ ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
if index < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCamelCase : Any = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE_ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE_ ):
return key(SCREAMING_SNAKE_CASE_ ).lower().replace("_" , "" )
return _inner
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE_ ):
return x
if key is None:
lowerCamelCase : Any = noop
# Constants are all uppercase, they go first.
lowerCamelCase : Optional[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase : Tuple = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ )[0].isupper() and not key(SCREAMING_SNAKE_CASE_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase : Optional[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE_ )[0].isupper()]
lowerCamelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE_ )
return sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCamelCase : str = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE_ )] ) + "]"
lowerCamelCase : List[str] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase : List[str] = 2 if lines[1].strip() == "[" else 1
lowerCamelCase : int = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase : Optional[int] = sort_objects(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] )
lowerCamelCase : str = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase : List[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase : Union[str, Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase : Optional[Any] = keys[:-1]
lowerCamelCase : Optional[Any] = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE_ )] )
return "\n".join(SCREAMING_SNAKE_CASE_ )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase : List[str] = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE_ )
return import_statement
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" ) as f:
lowerCamelCase : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE_ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase : Any = main_blocks[block_idx]
lowerCamelCase : List[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCamelCase : Any = 0
while line_idx < len(SCREAMING_SNAKE_CASE_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase : str = len(SCREAMING_SNAKE_CASE_ )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase : Optional[int] = "\n".join(block_lines[line_idx:-1] )
lowerCamelCase : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase : Optional[int] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE_ , indent_level=SCREAMING_SNAKE_CASE_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase : str = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase : Any = [(pattern.search(SCREAMING_SNAKE_CASE_ ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase : Any = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE_ ) if key is not None]
lowerCamelCase : Optional[int] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase : Optional[int] = 0
lowerCamelCase : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCamelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE_ )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase : str = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE_ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
lowerCamelCase : List[str] = sort_imports(os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE_ )
if result:
lowerCamelCase : Any = [os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE_ )} files, run `make style`.""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
_snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 283 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Any = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : Dict = "deberta-v2"
def __init__(self : Dict , snake_case__ : List[str]=12_81_00 , snake_case__ : Optional[int]=15_36 , snake_case__ : Union[str, Any]=24 , snake_case__ : Union[str, Any]=24 , snake_case__ : str=61_44 , snake_case__ : Optional[Any]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Dict=5_12 , snake_case__ : Dict=0 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[int]=1e-7 , snake_case__ : Tuple=False , snake_case__ : List[str]=-1 , snake_case__ : Dict=0 , snake_case__ : int=True , snake_case__ : Optional[int]=None , snake_case__ : Optional[Any]=0 , snake_case__ : int="gelu" , **snake_case__ : Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : Optional[Any] = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Any = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : List[str] = relative_attention
snake_case : str = max_relative_positions
snake_case : Union[str, Any] = pad_token_id
snake_case : int = position_biased_input
# Backwards compatibility
if type(snake_case__ ) == str:
snake_case : str = [x.strip() for x in pos_att_type.lower().split("|" )]
snake_case : int = pos_att_type
snake_case : Tuple = vocab_size
snake_case : Union[str, Any] = layer_norm_eps
snake_case : List[Any] = kwargs.get("pooler_hidden_size" , snake_case__ )
snake_case : Optional[Any] = pooler_dropout
snake_case : Optional[Any] = pooler_hidden_act
class UpperCAmelCase ( A_ ):
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Dict = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
return 12
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 40 , snake_case__ : int = 40 , snake_case__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Any = super().generate_dummy_inputs(preprocessor=snake_case__ , framework=snake_case__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__SCREAMING_SNAKE_CASE : Dict = data_utils.TransfoXLTokenizer
__SCREAMING_SNAKE_CASE : List[str] = data_utils.TransfoXLCorpus
__SCREAMING_SNAKE_CASE : str = data_utils
__SCREAMING_SNAKE_CASE : Union[str, Any] = data_utils
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCAmelCase , "rb" ) as fp:
_UpperCAmelCase : Optional[Any] = pickle.load(_UpperCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase : List[str] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase : Any = corpus.vocab.__dict__
torch.save(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , _UpperCAmelCase )
_UpperCAmelCase : int = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase : Tuple = os.path.abspath(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = os.path.abspath(_UpperCAmelCase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase : Tuple = TransfoXLConfig()
else:
_UpperCAmelCase : Union[str, Any] = TransfoXLConfig.from_json_file(_UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase : Any = TransfoXLLMHeadModel(_UpperCAmelCase )
_UpperCAmelCase : Tuple = load_tf_weights_in_transfo_xl(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
_UpperCAmelCase : Any = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : List[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCAmelCase )}""" )
torch.save(model.state_dict() , _UpperCAmelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCAmelCase )}""" )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 31 |
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = arr.split("," )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [int(self.array[0] )] * len(self.array )
UpperCamelCase_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCamelCase_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCamelCase_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCAmelCase : Tuple =input("""please input some numbers:""")
UpperCAmelCase : Optional[int] =SubArray(whole_array)
UpperCAmelCase : List[Any] =array.solve_sub_array()
print(("""the results is:""", re))
| 128 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """CLIPImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
UpperCamelCase = kwargs.pop('feature_extractor' )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCamelCase = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def A ( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Optional[int] , *UpperCamelCase__ : int , **UpperCamelCase__ : Any ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : Tuple ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def A ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 249 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
UpperCamelCase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(A__ ):
os.makedirs(A__ )
UpperCamelCase = model.state_dict()
def to_tf_var_name(A__ ):
for patt, repl in iter(A__ ):
UpperCamelCase = name.replace(A__ , A__ )
return F"""bert/{name}"""
def create_tf_var(A__ , A__ , A__ ):
UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase = tf.get_variable(dtype=A__ , shape=tensor.shape , name=A__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(A__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase = to_tf_var_name(A__ )
UpperCamelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase = torch_tensor.T
UpperCamelCase = create_tf_var(tensor=A__ , name=A__ , session=A__ )
tf.keras.backend.set_value(A__ , A__ )
UpperCamelCase = session.run(A__ )
print(F"""Successfully created {tf_name}: {np.allclose(A__ , A__ )}""" )
UpperCamelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(A__ , os.path.join(A__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def __lowerCamelCase ( A__=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A__ , required=A__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=A__ , default=A__ , required=A__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=A__ , required=A__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=A__ , required=A__ , help='Directory in which to save tensorflow model' )
UpperCamelCase = parser.parse_args(A__ )
UpperCamelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=A__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 249 | 1 |
import functools
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = len(_UpperCAmelCase )
@functools.cache
def min_distance(_UpperCAmelCase , _UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _UpperCAmelCase ) , 1 + min_distance(_UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , ):
"""simple docstring"""
lowerCamelCase : str = parent
lowerCamelCase : Union[str, Any] = 13
lowerCamelCase : Optional[Any] = 7
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = 99
lowerCamelCase : Tuple = 0
lowerCamelCase : Any = 32
lowerCamelCase : List[Any] = 2
lowerCamelCase : Tuple = 4
lowerCamelCase : List[str] = 0.1
lowerCamelCase : int = 0.1
lowerCamelCase : int = 512
lowerCamelCase : List[Any] = 16
lowerCamelCase : Any = 2
lowerCamelCase : Any = 0.02
lowerCamelCase : List[str] = 3
lowerCamelCase : Tuple = 4
lowerCamelCase : int = "last"
lowerCamelCase : int = True
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 0
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : str = None
if self.use_token_type_ids:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Dict = None
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A )
lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : Dict = model(__A )
lowerCamelCase : Any = [input_ids, input_mask]
lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A )
lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A )
lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A )
lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A )
lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Any = self.num_choices
lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A )
lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__A : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : List[str] = False
def _snake_case ( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFFlaubertModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowerCamelCase : str = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase : Dict = model(__A )[0]
lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
lowerCamelCase : Tuple = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 283 | 0 |
def lowerCAmelCase__ ( ) ->list[list[int]]:
'''simple docstring'''
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase__ ( a__ ) ->None:
'''simple docstring'''
assert all(row == sorted(a__ , reverse=a__ ) for row in grid )
assert all(list(a__ ) == sorted(a__ , reverse=a__ ) for col in zip(*a__ ) )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCamelCase = (left + right) // 2
_UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCamelCase = mid + 1
else:
_UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a__ )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = len(grid[0] )
for i in range(len(a__ ) ):
_UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(a__ ) * len(grid[0] )) - total
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 0
for row in grid:
for i, number in enumerate(a__ ):
if number < 0:
total += len(a__ ) - i
break
return total
def lowerCAmelCase__ ( ) ->None:
'''simple docstring'''
from timeit import timeit
print("Running benchmarks" )
_UpperCamelCase = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCamelCase = timeit(f'{func}(grid=grid)' , setup=a__ , number=500 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 63 | import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''deta'''
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , lowercase_ : int=None , lowercase_ : Union[str, Any]=900 , lowercase_ : Any=2048 , lowercase_ : Optional[int]=6 , lowercase_ : Optional[int]=2048 , lowercase_ : List[Any]=8 , lowercase_ : Union[str, Any]=6 , lowercase_ : Optional[Any]=1024 , lowercase_ : Dict=8 , lowercase_ : Any=0.0 , lowercase_ : str=True , lowercase_ : List[Any]="relu" , lowercase_ : Optional[int]=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1.0 , lowercase_ : List[str]=True , lowercase_ : Any=False , lowercase_ : int="sine" , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : Any=4 , lowercase_ : Tuple=True , lowercase_ : List[Any]=300 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : str=1 , lowercase_ : List[str]=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=1 , lowercase_ : int=1 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.25 , **lowercase_ : Any , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
else:
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = backbone_config.pop("model_type")
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowercase_)
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self.d_model
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 63 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
def __init__( self : Dict , lowercase : List[str] , lowercase : Optional[int]=13 , lowercase : Any=32 , lowercase : Dict=3 , lowercase : Optional[Any]=4 , lowercase : List[Any]=[10, 20, 30, 40] , lowercase : Tuple=[2, 2, 3, 2] , lowercase : Tuple=True , lowercase : Dict=True , lowercase : Tuple=37 , lowercase : Dict="gelu" , lowercase : Optional[int]=10 , lowercase : Union[str, Any]=0.02 , lowercase : Dict=["stage2", "stage3", "stage4"] , lowercase : Dict=[2, 3, 4] , lowercase : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = num_stages
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
UpperCAmelCase = scope
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def A ( self : str ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = ConvNextModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase = model(__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = ConvNextForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , lowercase : str , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase = ConvNextBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase = model(__UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = ConvNextBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
__a : Tuple = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__a : int = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__a : str = True
__a : Optional[Any] = False
__a : Dict = False
__a : Any = False
__a : Any = False
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = ConvNextModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__UpperCamelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
def A ( self : str ):
'''simple docstring'''
def check_hidden_states_output(lowercase : str , lowercase : Optional[int] , lowercase : Optional[Any] ):
UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def A ( self : str ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ConvNextModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def snake_case_ ():
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def A ( self : Any ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(__UpperCamelCase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@require_torch
class _a ( unittest.TestCase , __a ):
__a : Optional[int] = (ConvNextBackbone,) if is_torch_available() else ()
__a : List[str] = ConvNextConfig
__a : int = False
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = ConvNextModelTester(self )
| 34 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def lowercase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 260 | 0 |
def A_ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 352 |
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase : Any = """path-to-your-trained-model"""
lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCAmelCase : Union[str, Any] = """A photo of sks dog in a bucket"""
lowerCAmelCase : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 127 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: Optional[int]=() , _lowerCamelCase: Any=None , _lowerCamelCase: List[str]="no" , _lowerCamelCase: Tuple="29500" ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Any = False
__lowerCamelCase : List[str] = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
__lowerCamelCase : str = True
elif "IPython" in sys.modules:
__lowerCamelCase : List[Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
__lowerCamelCase : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _lowerCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
__lowerCamelCase : Optional[Any] = 8
__lowerCamelCase : Optional[int] = PrepareForLaunch(_lowerCamelCase , distributed_type="TPU" )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*_lowerCamelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCamelCase , master_addr="127.0.01" , master_port=_lowerCamelCase , mixed_precision=_lowerCamelCase ):
__lowerCamelCase : List[Any] = PrepareForLaunch(_lowerCamelCase , distributed_type="MULTI_GPU" )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCamelCase : int = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[str]=() , _lowerCamelCase: List[Any]=2 ) -> Tuple:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCamelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
__lowerCamelCase : Optional[Any] = PrepareForLaunch(_lowerCamelCase , debug=_lowerCamelCase )
start_processes(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method="fork" ) | 135 | """simple docstring"""
import numpy as np
from PIL import Image
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Dict = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Union[str, Any] = 0
# compute the shape of the output matrix
__lowerCamelCase : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = 0
return updated_arr
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : str = 0
# compute the shape of the output matrix
__lowerCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase : Optional[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__A = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 135 | 1 |
def UpperCamelCase ( _A : list , _A : list , _A : int )-> int:
"""simple docstring"""
if len(_A ) != len(_A ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A__ = [p / w for p, w in zip(_A , _A )]
# Creating a copy of the list and sorting profit/weight in ascending order
A__ = sorted(_A )
# declaring useful variables
A__ = len(_A )
A__ = 0
A__ = 0
A__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A__ = sorted_profit_by_weight[length - i - 1]
A__ = profit_by_weight.index(_A )
A__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
UpperCAmelCase_ : Optional[Any] = [int(x) for x in input("Input profits separated by spaces: ").split()]
UpperCAmelCase_ : Dict = [int(x) for x in input("Input weights separated by spaces: ").split()]
UpperCAmelCase_ : Union[str, Any] = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 198 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase_ : Dict = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
UpperCAmelCase_ : Any = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def UpperCamelCase ( _A : Dict )-> Optional[int]:
"""simple docstring"""
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
A__ = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
A__ = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
A__ = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , _A )
# ffn -> feed_forward
A__ = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
A__ = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
A__ = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
A__ = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
A__ = "rwkv." + name
A__ = weight
return state_dict
def UpperCamelCase ( _A : str , _A : List[Any] , _A : List[Any] , _A : int=None , _A : List[str]=None , _A : Dict=False , _A : List[Any]=None )-> str:
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
A__ = 50277
A__ = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=_A )
A__ = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
A__ = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(_A , _A )
A__ = torch.load(_A , map_location="cpu" )
A__ = convert_state_dict(_A )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
A__ = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
A__ = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 198 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : List[str] = TypeVar("T")
class _snake_case ( Generic[T] ):
def __init__( self , a__ = True ) -> None:
'''simple docstring'''
snake_case_ = {} # dictionary of lists
snake_case_ = directed
def lowerCAmelCase__ ( self , a__ , a__ ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
self.adj_list[destination_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a__ )
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case_ = [destination_vertex]
snake_case_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case_ = [destination_vertex]
snake_case_ = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 85 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85 | 1 |
"""simple docstring"""
from PIL import Image
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Image:
"""simple docstring"""
def brightness(__snake_case ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
_a = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""") | 361 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_a = get_logger(__name__)
class _UpperCAmelCase:
def __init__( self , __a = None) -> List[str]:
'''simple docstring'''
_UpperCamelCase = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCamelCase = Extractor
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCamelCase = os.path.abspath(__a)
return os.path.join(self.extract_dir , hash_url_to_filename(__a))
def UpperCAmelCase ( self , __a , __a) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__a) and not (os.path.isdir(__a) and os.listdir(__a))
)
def UpperCAmelCase ( self , __a , __a = False) -> str:
'''simple docstring'''
_UpperCamelCase = self.extractor.infer_extractor_format(__a)
if not extractor_format:
return input_path
_UpperCamelCase = self._get_output_path(__a)
if self._do_extract(__a , __a):
self.extractor.extract(__a , __a , __a)
return output_path
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
@abstractmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
...
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
lowercase__ = []
@staticmethod
def UpperCAmelCase ( __a , __a) -> Any:
'''simple docstring'''
with open(__a , '''rb''') as f:
return f.read(__a)
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if not magic_number:
_UpperCamelCase = max(len(__a) for cls_magic_number in cls.magic_numbers)
try:
_UpperCamelCase = cls.read_magic_number(__a , __a)
except OSError:
return False
return any(magic_number.startswith(__a) for cls_magic_number in cls.magic_numbers)
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__a)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
def resolved(__a) -> str:
return os.path.realpath(os.path.abspath(__a))
def badpath(__a , __a) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a)).startswith(__a)
def badlink(__a , __a) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCamelCase = resolved(os.path.join(__a , os.path.dirname(info.name)))
return badpath(info.linkname , base=__a)
_UpperCamelCase = resolved(__a)
for finfo in members:
if badpath(finfo.name , __a):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''')
elif finfo.issym() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''')
elif finfo.islnk() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''')
else:
yield finfo
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = tarfile.open(__a)
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a))
tar_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x1F\x8B']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with gzip.open(__a , '''rb''') as gzip_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if super().is_extractable(__a , magic_number=__a):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , '''rb''') as fp:
_UpperCamelCase = _EndRecData(__a)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCamelCase = fp.read(__a) # CD is where we expect it to be
if len(__a) == sizeCentralDir:
_UpperCamelCase = struct.unpack(__a , __a) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
with zipfile.ZipFile(__a , '''r''') as zip_file:
zip_file.extractall(__a)
zip_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with lzma.open(__a) as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''')
import rarfile
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = rarfile.RarFile(__a)
rf.extractall(__a)
rf.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''')
import zstandard as zstd
_UpperCamelCase = zstd.ZstdDecompressor()
with open(__a , '''rb''') as ifh, open(__a , '''wb''') as ofh:
dctx.copy_stream(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x42\x5A\x68']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with bza.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''')
import pyazr
os.makedirs(__a , exist_ok=__a)
with pyazr.SevenZipFile(__a , '''r''') as archive:
archive.extractall(__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x04\x22\x4D\x18']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''')
import lza.frame
with lza.frame.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase ( cls) -> Any:
'''simple docstring'''
return max(
len(__a)
for extractor in cls.extractors.values()
if issubclass(__a , __a)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a)
except OSError:
return b""
@classmethod
def UpperCAmelCase ( cls , __a , __a = False) -> bool:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = cls.infer_extractor_format(__a)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase ( cls , __a) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_UpperCamelCase = cls._get_magic_number_max_length()
_UpperCamelCase = cls._read_magic_number(__a , __a)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a):
return extractor_format
@classmethod
def UpperCAmelCase ( cls , __a , __a , __a = None , __a = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__a) , exist_ok=__a)
# Prevent parallel extractions
_UpperCamelCase = str(Path(__a).with_suffix('''.lock'''))
with FileLock(__a):
shutil.rmtree(__a , ignore_errors=__a)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = extractor if extractor != '''deprecated''' else extractor_format
else:
_UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(__a , __a)
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a):
return extractor.extract(__a , __a)
| 100 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit_mae"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , snake_case__=512 , snake_case__=8 , snake_case__=2_048 , snake_case__=0.75 , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : List[str] = qkv_bias
lowerCAmelCase : Any = decoder_num_attention_heads
lowerCAmelCase : Any = decoder_hidden_size
lowerCAmelCase : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase : Union[str, Any] = decoder_intermediate_size
lowerCAmelCase : Optional[Any] = mask_ratio
lowerCAmelCase : List[str] = norm_pix_loss
| 108 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCAmelCase :
def __init__( self: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str]=13 , _lowerCAmelCase: Dict=7 , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: str=99 , _lowerCAmelCase: Dict=32 , _lowerCAmelCase: Dict=5 , _lowerCAmelCase: int=4 , _lowerCAmelCase: Optional[Any]=4 , _lowerCAmelCase: Any="gelu" , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Any=5_12 , _lowerCAmelCase: Optional[Any]=16 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Tuple=0.02 , _lowerCAmelCase: str=3 , _lowerCAmelCase: List[str]=4 , _lowerCAmelCase: Optional[Any]=None , ):
lowercase :List[str] = parent
lowercase :List[str] = batch_size
lowercase :List[str] = seq_length
lowercase :Dict = is_training
lowercase :Union[str, Any] = use_input_mask
lowercase :Optional[int] = use_token_type_ids
lowercase :Dict = use_labels
lowercase :Any = vocab_size
lowercase :List[Any] = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Union[str, Any] = num_attention_heads
lowercase :Any = intermediate_multiple_size
lowercase :str = hidden_act
lowercase :List[Any] = hidden_dropout
lowercase :Optional[Any] = attention_dropout
lowercase :int = weight_tying
lowercase :Tuple = max_position_embeddings
lowercase :str = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Dict = initializer_range
lowercase :Dict = num_labels
lowercase :Dict = num_choices
lowercase :Any = scope
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :Optional[Any] = None
if self.use_input_mask:
lowercase :str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :int = None
if self.use_labels:
lowercase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self: str ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase , lowercase , lowercase , lowercase :Any = self.prepare_config_and_inputs()
lowercase :Tuple = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str] ):
lowercase :List[str] = GPTNeoXJapaneseModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
lowercase :Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: int ):
lowercase :Dict = True
lowercase :Optional[Any] = GPTNeoXJapaneseModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str , _lowerCAmelCase: Dict , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] ):
lowercase :str = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any ):
lowercase :Optional[Any] = True
lowercase :List[str] = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
lowercase :str = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
lowercase :Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase :Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase :Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase :List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase :Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
lowercase :List[str] = output_from_no_past["hidden_states"][0]
lowercase :str = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["hidden_states"][0]
# select random slice
lowercase :Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase :str = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase :List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :str = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_a = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_a = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Any = GPTNeoXJapaneseModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase , lowercase , lowercase , lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase , lowercase , lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# This regression test was failing with PyTorch < 1.3
lowercase , lowercase , lowercase , lowercase :Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase :Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase , lowercase , lowercase , lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :int = "abeja/gpt-neox-japanese-2.7b"
lowercase :Optional[int] = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
lowercase :int = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
lowercase :List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_lowerCAmelCase )
lowercase :Tuple = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowerCAmelCase )
lowercase :List[str] = []
for prompt in prompts:
lowercase :Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
lowercase :Union[str, Any] = model.generate(_lowerCAmelCase , max_length=50 )
lowercase :List[str] = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 236 | 0 |
def lowerCamelCase__ (_UpperCAmelCase):
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE = grid[0]
for row_n in range(1 , len(_UpperCAmelCase)):
SCREAMING_SNAKE_CASE = grid[row_n]
SCREAMING_SNAKE_CASE = fill_row(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = grid[row_n]
return grid[-1][-1]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCAmelCase)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase__ (_UpperCAmelCase):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _snake_case ( nn.Module ):
def __init__( self , a , a) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE = module
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Linear(module.in_features , a , bias=a) , nn.Linear(a , module.out_features , bias=a) , )
SCREAMING_SNAKE_CASE = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def SCREAMING_SNAKE_CASE__ ( self , a , *a , **a) -> Any:
return self.module(a , *a , **a) + self.adapter(a)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowercase : Union[str, Any] = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : str = 2.109_6595_5269_2574
_lowercase : Any = '''Hello my name is'''
_lowercase : Any = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : Union[str, Any] = 10
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(self.model_name)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_abit.config
self.assertTrue(hasattr(a , 'quantization_config'))
SCREAMING_SNAKE_CASE = config.to_dict()
SCREAMING_SNAKE_CASE = config.to_diff_dict()
SCREAMING_SNAKE_CASE = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
SCREAMING_SNAKE_CASE = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
with self.assertRaises(a), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaises(a):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(a):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.model_fpaa.to(torch.floataa)
SCREAMING_SNAKE_CASE = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.to('cpu')
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=a , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> Tuple:
SCREAMING_SNAKE_CASE = 't5-small'
SCREAMING_SNAKE_CASE = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(cls.model_name)
SCREAMING_SNAKE_CASE = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = None
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
SCREAMING_SNAKE_CASE = modules
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE = 'bigscience/bloom-560m'
SCREAMING_SNAKE_CASE = 't5-small'
# Different types of model
SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# Sequence classification model
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map='auto')
# CausalLM model
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# Seq2seq model
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map='auto')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
SCREAMING_SNAKE_CASE = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
SCREAMING_SNAKE_CASE = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a)):
SCREAMING_SNAKE_CASE = LoRALayer(module.q_proj , rank=16)
SCREAMING_SNAKE_CASE = LoRALayer(module.k_proj , rank=16)
SCREAMING_SNAKE_CASE = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
SCREAMING_SNAKE_CASE = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE = model.forward(**a)
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(a , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _snake_case ( A__ ):
_lowercase : str = '''gpt2-xl'''
_lowercase : Union[str, Any] = 3.3191_8548_5415_2187
| 327 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A__: Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A__: Union[str, Any] = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_a : Optional[Any] =self.transformer_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def __UpperCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] ="""src/transformers"""
shutil.rmtree(self.transformer_dir )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> Tuple:
'''simple docstring'''
_a : List[Any] =comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_a : Any =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_a : Optional[Any] =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
_a : List[Any] =black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
_a : Optional[int] =os.path.join(self.transformer_dir , """new_code.py""" )
with open(SCREAMING_SNAKE_CASE , """w""" , newline="""\n""" ) as f:
f.write(SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , """r""" ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
_a : Dict =check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str ) -> str:
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
_a : int ="""TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("""Bert""" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , SCREAMING_SNAKE_CASE , overwrite_result=re.sub("""Bert""" , """TestModel""" , SCREAMING_SNAKE_CASE ) , )
def __UpperCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
_a : List[Any] =check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_a : Tuple =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_a : Union[str, Any] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_a : Optional[int] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_a , _a : Union[str, Any] =check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
self.assertFalse(SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a , _a : List[str] =check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(SCREAMING_SNAKE_CASE )
_a : Dict =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_a : Tuple =(
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_a : Optional[Any] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_a , _a : Union[str, Any] =check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 276 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( a_ , unittest.TestCase ):
__lowerCAmelCase : Any = GPTaTokenizer
__lowerCAmelCase : Tuple = GPTaTokenizerFast
__lowerCAmelCase : Any = True
__lowerCAmelCase : Union[str, Any] = {"""add_prefix_space""": True}
__lowerCAmelCase : Union[str, Any] = False
def __lowerCamelCase ( self :Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case__ : List[str] = dict(zip(lowercase_ ,range(len(lowercase_ ) ) ) )
snake_case__ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case__ : List[str] = {'''unk_token''': '''<unk>'''}
snake_case__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def __lowerCamelCase ( self :Dict ,**__lowercase :Optional[int] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**lowercase_ )
def __lowerCamelCase ( self :List[str] ,**__lowercase :str ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**lowercase_ )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ):
snake_case__ : List[Any] = '''lower newer'''
snake_case__ : int = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Union[str, Any] = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
snake_case__ : str = '''lower newer'''
snake_case__ : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case__ : Tuple = tokenizer.tokenize(lowercase_ ,add_prefix_space=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) ,lowercase_ )
def __lowerCamelCase ( self :Optional[Any] ):
if not self.test_rust_tokenizer:
return
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Dict = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
snake_case__ : int = '''lower newer'''
# Testing tokenization
snake_case__ : str = tokenizer.tokenize(lowercase_ ,add_prefix_space=lowercase_ )
snake_case__ : Union[str, Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# Testing conversion to ids without special tokens
snake_case__ : Dict = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ ,add_prefix_space=lowercase_ )
snake_case__ : List[Any] = rust_tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# Testing conversion to ids with special tokens
snake_case__ : Tuple = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
snake_case__ : Any = tokenizer.encode(lowercase_ ,add_prefix_space=lowercase_ )
snake_case__ : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# Testing the unknown token
snake_case__ : Any = tokens + [rust_tokenizer.unk_token]
snake_case__ : Tuple = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ) ,lowercase_ )
def __lowerCamelCase ( self :Union[str, Any] ,*__lowercase :Optional[Any] ,**__lowercase :Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[Any]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
# Simple input
snake_case__ : str = '''This is a simple input'''
snake_case__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : List[str] = ('''This is a simple input''', '''This is a pair''')
snake_case__ : Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase_ ,tokenizer_r.encode ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' )
# Simple input
self.assertRaises(lowercase_ ,tokenizer_r.encode_plus ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase_ ,tokenizer_r.batch_encode_plus ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(lowercase_ ,tokenizer_r.encode ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' )
# Pair input
self.assertRaises(lowercase_ ,tokenizer_r.encode_plus ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase_ ,tokenizer_r.batch_encode_plus ,lowercase_ ,max_length=lowercase_ ,padding='''max_length''' ,)
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='''<pad>''' )
# Simple input
snake_case__ : Any = '''This is a simple input'''
snake_case__ : Tuple = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case__ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
snake_case__ : Optional[int] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case__ : str = tokenizer.pad_token_id
snake_case__ : List[Any] = tokenizer(lowercase_ ,padding='''max_length''' ,max_length=3_0 ,return_tensors='''np''' )
snake_case__ : int = tokenizer(lowercase_ ,padding=lowercase_ ,truncate=lowercase_ ,return_tensors='''np''' )
snake_case__ : int = tokenizer(*lowercase_ ,padding='''max_length''' ,max_length=6_0 ,return_tensors='''np''' )
snake_case__ : Optional[Any] = tokenizer(lowercase_ ,padding=lowercase_ ,truncate=lowercase_ ,return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] ,3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] ,3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] ,6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] ,5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCamelCase ( self :Dict ):
snake_case__ : Optional[Any] = '''$$$'''
snake_case__ : str = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowercase_ ,add_bos_token=lowercase_ )
snake_case__ : Union[str, Any] = '''This is a simple input'''
snake_case__ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : int = tokenizer.bos_token_id
snake_case__ : List[Any] = tokenizer(lowercase_ )
snake_case__ : List[Any] = tokenizer(lowercase_ )
self.assertEqual(out_s.input_ids[0] ,lowercase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ : str = tokenizer.decode(out_s.input_ids )
snake_case__ : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowercase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowerCamelCase ( self :Tuple ):
pass
def __lowerCamelCase ( self :int ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
snake_case__ : List[Any] = [self.get_tokenizer(do_lower_case=lowercase_ ,add_bos_token=lowercase_ )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ : Optional[Any] = '''Encode this.'''
snake_case__ : List[str] = '''This one too please.'''
snake_case__ : Optional[Any] = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
encoded_sequence += tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
snake_case__ : Any = tokenizer.encode_plus(
lowercase_ ,lowercase_ ,add_special_tokens=lowercase_ ,return_special_tokens_mask=lowercase_ ,)
snake_case__ : int = encoded_sequence_dict['''input_ids''']
snake_case__ : str = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
snake_case__ : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ )
]
snake_case__ : Optional[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase_ ,lowercase_ )
@require_tokenizers
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :List[str] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
snake_case__ : int = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=lowercase_ )
snake_case__ : Optional[Any] = '''A photo of a cat'''
snake_case__ : int = tokenizer.encode(
lowercase_ ,)
self.assertEqual(lowercase_ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained('''./test_opt''' )
snake_case__ : int = tokenizer.encode(
lowercase_ ,)
self.assertEqual(lowercase_ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,use_slow=lowercase_ )
snake_case__ : Union[str, Any] = '''A photo of a cat'''
snake_case__ : int = tokenizer.encode(
lowercase_ ,)
# Same as above
self.assertEqual(lowercase_ ,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=lowercase_ )
snake_case__ : int = '''bos'''
snake_case__ : Dict = tokenizer.get_vocab()['''bos''']
snake_case__ : List[str] = '''A photo of a cat'''
snake_case__ : int = tokenizer.encode(
lowercase_ ,)
# We changed the bos token
self.assertEqual(lowercase_ ,[3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
snake_case__ : Dict = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
snake_case__ : Optional[int] = tokenizer.encode(
lowercase_ ,)
self.assertEqual(lowercase_ ,[3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 357 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case__ : List[str] = 1
snake_case__ : int = 1
while repunit:
snake_case__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( __lowerCAmelCase = 1000000 ) -> int:
"""simple docstring"""
snake_case__ : str = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = tmp_path / "file.csv"
__UpperCAmelCase : Optional[int] = textwrap.dedent(
"""\\n header1,header2\n 1,2\n 10,20\n """ )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : str = tmp_path / "malformed_file.csv"
__UpperCAmelCase : List[Any] = textwrap.dedent(
"""\\n header1,header2\n 1,2\n 10,20,\n """ )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
__UpperCAmelCase : Tuple = tmp_path / "csv_with_image.csv"
__UpperCAmelCase : str = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = tmp_path / "csv_with_label.csv"
__UpperCAmelCase : Optional[Any] = textwrap.dedent(
"""\\n label\n good\n bad\n good\n """ )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Dict = tmp_path / "csv_with_int_list.csv"
__UpperCAmelCase : str = textwrap.dedent(
"""\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n """ )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : List[Any] = Csv()
__UpperCAmelCase : Union[str, Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__a , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__a ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase__ ( __lowerCamelCase : str ):
with open(__a , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Optional[Any] = f.read().splitlines()[1]
__UpperCAmelCase : Any = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
__UpperCAmelCase : int = csv._generate_tables([[csv_file_with_image]] )
__UpperCAmelCase : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
__UpperCAmelCase : List[str] = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
with open(__a , encoding="""utf-8""" ) as f:
__UpperCAmelCase : str = f.read().splitlines()[1:]
__UpperCAmelCase : Any = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
__UpperCAmelCase : Optional[int] = csv._generate_tables([[csv_file_with_label]] )
__UpperCAmelCase : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
__UpperCAmelCase : Optional[int] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__a ) for label in labels]
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : List[Any] = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __lowerCamelCase : [int(__a ) for i in x.split()]} )
__UpperCAmelCase : Tuple = csv._generate_tables([[csv_file_with_int_list]] )
__UpperCAmelCase : Dict = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
__UpperCAmelCase : Any = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 114 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> float:
return 0.0
def UpperCAmelCase ( a_ , a_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A_ : str = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A_ : int = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase ( a_ , a_ ) -> None:
"""simple docstring"""
A_ : int = 5_1_2
A_ : Union[str, Any] = [1] + [0] * (size - 1)
A_ : Optional[Any] = [filter_type.process(a_ ) for item in inputs]
A_ : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
A_ : str = np.abs(np.fft.fft(a_ ) )
A_ : int = 2_0 * np.logaa(a_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A_ : Optional[Any] = get_bounds(a_ , a_ )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(a_ )
plt.show()
def UpperCAmelCase ( a_ , a_ ) -> None:
"""simple docstring"""
A_ : str = 5_1_2
A_ : Tuple = [1] + [0] * (size - 1)
A_ : str = [filter_type.process(a_ ) for item in inputs]
A_ : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
A_ : List[Any] = np.angle(np.fft.fft(a_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(a_ , -2 * pi ) )
plt.show()
| 164 |
'''simple docstring'''
UpperCamelCase__ : int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 164 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
__lowercase : Union[str, Any] = feature_size
__lowercase : Tuple = sampling_rate
__lowercase : Union[str, Any] = padding_value
__lowercase : Tuple = kwargs.pop('''padding_side''' , '''right''' )
__lowercase : int = kwargs.pop('''return_attention_mask''' , UpperCamelCase_ )
super().__init__(**UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCamelCase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowercase : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__lowercase : Any = processed_features[self.model_input_names[0]]
__lowercase : Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCamelCase_ ) == 0:
if return_attention_mask:
__lowercase : Tuple = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowercase : Dict = required_input[0]
if isinstance(UpperCamelCase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowercase : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCamelCase_ ):
__lowercase : Union[str, Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCamelCase_ ):
__lowercase : Any = '''tf'''
elif is_torch_tensor(UpperCamelCase_ ):
__lowercase : List[Any] = '''pt'''
elif isinstance(UpperCamelCase_ , (int, float, list, tuple, np.ndarray) ):
__lowercase : Tuple = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(UpperCamelCase_ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowercase : str = to_numpy(UpperCamelCase_ )
else:
__lowercase : Union[str, Any] = [to_numpy(UpperCamelCase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowercase : Optional[Any] = self._get_padding_strategies(padding=UpperCamelCase_ , max_length=UpperCamelCase_ )
__lowercase : List[Any] = processed_features[self.model_input_names[0]]
__lowercase : int = len(UpperCamelCase_ )
if not all(len(UpperCamelCase_ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
__lowercase : Optional[Any] = []
for i in range(UpperCamelCase_ ):
__lowercase : List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowercase : List[Any] = self._truncate(
UpperCamelCase_ , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , truncation=UpperCamelCase_ , )
truncated_inputs.append(UpperCamelCase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowercase : Dict = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowercase : Any = PaddingStrategy.MAX_LENGTH
__lowercase : int = {}
for i in range(UpperCamelCase_ ):
# padding
__lowercase : Any = self._pad(
truncated_inputs[i] , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowercase : int = []
if value.dtype is np.dtype(np.floataa ):
__lowercase : Any = value.astype(np.floataa )
batch_outputs[key].append(UpperCamelCase_ )
return BatchFeature(UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> dict:
__lowercase : Tuple = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowercase : Dict = len(UpperCamelCase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase : Dict = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase : List[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCamelCase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowercase : Any = np.ones(len(UpperCamelCase_ ) , dtype=np.intaa )
if needs_to_be_padded:
__lowercase : Union[str, Any] = max_length - len(UpperCamelCase_ )
if self.padding_side == "right":
if return_attention_mask:
__lowercase : Any = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
__lowercase : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowercase : Any = np.pad(
UpperCamelCase_ , UpperCamelCase_ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowercase : List[Any] = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
__lowercase : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowercase : Dict = np.pad(
UpperCamelCase_ , UpperCamelCase_ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Dict:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
__lowercase : Optional[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase : List[Any] = len(UpperCamelCase_ ) > max_length
if needs_to_be_truncated:
__lowercase : Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowercase : Optional[Any] = processed_features['''attention_mask'''][:max_length]
return processed_features
def _lowerCamelCase ( self , UpperCamelCase_=False , UpperCamelCase_=None ) -> List[Any]:
# Get padding strategy
if padding is not False:
if padding is True:
__lowercase : Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Dict = PaddingStrategy(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Union[str, Any] = padding
else:
__lowercase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 249 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 249 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self , A , A ) -> Any:
_UpperCAmelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[Any] = 2_0
_UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(batch_size=2 , length=A )
# tweak scores to not be uniform anymore
_UpperCAmelCase : List[str] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCAmelCase : Union[str, Any] = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCAmelCase : str = jax.nn.softmax(A , axis=-1 )
_UpperCAmelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCAmelCase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(A , scores.copy() , cur_len=A ) , axis=-1 )
_UpperCAmelCase : Tuple = jax.nn.softmax(temp_dist_warper_smoother(A , scores.copy() , cur_len=A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Tuple = 1_0
_UpperCAmelCase : str = 2
# create ramp distribution
_UpperCAmelCase : Union[str, Any] = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCAmelCase : List[str] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCAmelCase : Optional[int] = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : Optional[int] = top_k_warp(A , A , cur_len=A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCAmelCase : Optional[int] = 5
_UpperCAmelCase : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCAmelCase : Tuple = np.broadcast_to(np.arange(A )[None, :] , (batch_size, length) ).copy()
_UpperCAmelCase : Dict = top_k_warp_safety_check(A , A , cur_len=A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : List[str] = 1_0
_UpperCAmelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCAmelCase : List[str] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCAmelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
_UpperCAmelCase : Optional[Any] = np.exp(top_p_warp(A , A , cur_len=A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCAmelCase : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCAmelCase : Any = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCAmelCase : str = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCAmelCase : int = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCAmelCase : List[Any] = top_p_warp(A , A , cur_len=A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[Any] = 2_0
_UpperCAmelCase : Optional[Any] = 4
_UpperCAmelCase : int = 0
_UpperCAmelCase : Any = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=A )
# check that min length is applied at length 5
_UpperCAmelCase : List[Any] = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
_UpperCAmelCase : Tuple = 5
_UpperCAmelCase : List[Any] = self._get_uniform_logits(A , A )
_UpperCAmelCase : Dict = min_dist_processor(A , A , cur_len=A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCAmelCase : Optional[Any] = self._get_uniform_logits(A , A )
_UpperCAmelCase : str = 1_5
_UpperCAmelCase : Any = min_dist_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : List[str] = 2_0
_UpperCAmelCase : str = 4
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
# check that all scores are -inf except the bos_token_id score
_UpperCAmelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=2_0 )
_UpperCAmelCase : str = 1
_UpperCAmelCase : Dict = self._get_uniform_logits(A , A )
_UpperCAmelCase : List[str] = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : Optional[Any] = self._get_uniform_logits(A , A )
_UpperCAmelCase : Optional[Any] = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Any = 2_0
_UpperCAmelCase : str = 4
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[str] = 5
_UpperCAmelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCAmelCase : str = ids_tensor((batch_size, 4) , vocab_size=2_0 )
_UpperCAmelCase : List[Any] = 4
_UpperCAmelCase : str = self._get_uniform_logits(A , A )
_UpperCAmelCase : Union[str, Any] = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCAmelCase : str = 3
_UpperCAmelCase : Any = self._get_uniform_logits(A , A )
_UpperCAmelCase : Optional[int] = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : str = 4
_UpperCAmelCase : Dict = 1_0
_UpperCAmelCase : Union[str, Any] = 1_5
_UpperCAmelCase : Any = 2
_UpperCAmelCase : str = 1
_UpperCAmelCase : Tuple = 1_5
# dummy input_ids and scores
_UpperCAmelCase : str = ids_tensor((batch_size, sequence_length) , A )
_UpperCAmelCase : Any = input_ids.copy()
_UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(A , A )
_UpperCAmelCase : Union[str, Any] = scores.copy()
# instantiate all dist processors
_UpperCAmelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : List[Any] = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=A )
_UpperCAmelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
_UpperCAmelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
_UpperCAmelCase : int = 1_0
# no processor list
_UpperCAmelCase : int = temp_dist_warp(A , A , cur_len=A )
_UpperCAmelCase : List[str] = top_k_warp(A , A , cur_len=A )
_UpperCAmelCase : Tuple = top_p_warp(A , A , cur_len=A )
_UpperCAmelCase : Union[str, Any] = min_dist_proc(A , A , cur_len=A )
_UpperCAmelCase : Tuple = bos_dist_proc(A , A , cur_len=A )
_UpperCAmelCase : int = eos_dist_proc(A , A , cur_len=A )
# with processor list
_UpperCAmelCase : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase : Optional[Any] = processor(A , A , cur_len=A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : Optional[int] = 1_0
_UpperCAmelCase : Optional[Any] = 1_5
_UpperCAmelCase : Optional[int] = 2
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Union[str, Any] = 1_5
# dummy input_ids and scores
_UpperCAmelCase : Tuple = ids_tensor((batch_size, sequence_length) , A )
_UpperCAmelCase : Tuple = input_ids.copy()
_UpperCAmelCase : Optional[Any] = self._get_uniform_logits(A , A )
_UpperCAmelCase : Any = scores.copy()
# instantiate all dist processors
_UpperCAmelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : str = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=A )
_UpperCAmelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
_UpperCAmelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
_UpperCAmelCase : List[str] = 1_0
# no processor list
def run_no_processor_list(A , A , A ):
_UpperCAmelCase : Tuple = temp_dist_warp(A , A , cur_len=A )
_UpperCAmelCase : Tuple = top_k_warp(A , A , cur_len=A )
_UpperCAmelCase : Union[str, Any] = top_p_warp(A , A , cur_len=A )
_UpperCAmelCase : Tuple = min_dist_proc(A , A , cur_len=A )
_UpperCAmelCase : str = bos_dist_proc(A , A , cur_len=A )
_UpperCAmelCase : Optional[int] = eos_dist_proc(A , A , cur_len=A )
return scores
# with processor list
def run_processor_list(A , A , A ):
_UpperCAmelCase : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase : Dict = processor(A , A , cur_len=A )
return scores
_UpperCAmelCase : Any = jax.jit(A )
_UpperCAmelCase : List[str] = jax.jit(A )
_UpperCAmelCase : Optional[int] = jitted_run_no_processor_list(A , A , A )
_UpperCAmelCase : int = jitted_run_processor_list(A , A , A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 68 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCAmelCase :str = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
a__ =field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
a__ =field(
default=1_2_8 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
a__ =field(
default=a ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = self.task_name.lower()
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''train'''
a__ ='''dev'''
a__ ='''test'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
a__ =42
a__ =42
def __init__( self , A , A , A = None , A = Split.train , A = None , ) -> Dict:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , A , )
_UpperCAmelCase : Dict = args
_UpperCAmelCase : int = glue_processors[args.task_name]()
_UpperCAmelCase : Any = glue_output_modes[args.task_name]
if isinstance(A , A ):
try:
_UpperCAmelCase : int = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_UpperCAmelCase : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
_UpperCAmelCase : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = label_list[2], label_list[1]
_UpperCAmelCase : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[Any] = cached_features_file + '''.lock'''
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
_UpperCAmelCase : str = time.time()
_UpperCAmelCase : Dict = torch.load(A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
_UpperCAmelCase : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_UpperCAmelCase : List[Any] = self.processor.get_test_examples(args.data_dir )
else:
_UpperCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_UpperCAmelCase : Tuple = examples[:limit_length]
_UpperCAmelCase : str = glue_convert_examples_to_features(
A , A , max_length=args.max_seq_length , label_list=A , output_mode=self.output_mode , )
_UpperCAmelCase : Optional[int] = time.time()
torch.save(self.features , A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self , A ) -> InputFeatures:
return self.features[i]
def __lowerCAmelCase ( self ) -> List[Any]:
return self.label_list
| 68 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__a ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_a = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def UpperCamelCase__ ( self : int ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__a ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_a = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_a = AutoTokenizer.from_pretrained(__a )
_a = FlaxBertModel.from_pretrained(__a )
_a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a : Optional[Any] ):
return model(**__a )
eval(**__a ).block_until_ready()
@slow
def UpperCamelCase__ ( self : Dict ):
for model_name in ["roberta-base", "roberta-large"]:
_a = AutoTokenizer.from_pretrained(__a )
_a = FlaxRobertaModel.from_pretrained(__a )
_a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a : str ):
return model(**__a )
eval(**__a ).block_until_ready()
def UpperCamelCase__ ( self : Any ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = FlaxAutoModel.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : int ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = FlaxAutoModel.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : Dict ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
_a = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : str ):
with self.assertRaisesRegex(__a , "Use `from_pt=True` to load this model" ):
_a = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 63 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
snake_case : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case )} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
UpperCamelCase__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase__ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
UpperCamelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
UpperCamelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
UpperCamelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
UpperCamelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
UpperCamelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
UpperCamelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'train'
UpperCamelCase__ = 'dev'
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = False , _a = None , _a = "pt" , ):
__magic_name__ : Optional[Any] = args
__magic_name__ : str = is_language_sensitive
__magic_name__ : Union[str, Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a ):
try:
__magic_name__ : int = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__magic_name__ : List[str] = mode
# Load data features from cache or dataset file
__magic_name__ : Union[str, Any] = "v2" if args.version_2_with_negative else "v1"
__magic_name__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ : Optional[Any] = cached_features_file + ".lock"
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
__magic_name__ : List[Any] = time.time()
__magic_name__ : Dict = torch.load(_a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__magic_name__ : Dict = self.old_features["features"]
__magic_name__ : Optional[Any] = self.old_features.get("dataset" , _a )
__magic_name__ : str = self.old_features.get("examples" , _a )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
__magic_name__ : Optional[int] = self.processor.get_dev_examples(args.data_dir )
else:
__magic_name__ : Optional[Any] = self.processor.get_train_examples(args.data_dir )
__magic_name__ , __magic_name__ : List[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
__magic_name__ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , _a ):
# Convert to Tensors and build dataset
__magic_name__ : List[Any] = self.features[i]
__magic_name__ : Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long )
__magic_name__ : Dict = torch.tensor(feature.attention_mask , dtype=torch.long )
__magic_name__ : List[str] = torch.tensor(feature.token_type_ids , dtype=torch.long )
__magic_name__ : Tuple = torch.tensor(feature.cls_index , dtype=torch.long )
__magic_name__ : str = torch.tensor(feature.p_mask , dtype=torch.float )
__magic_name__ : Dict = torch.tensor(feature.is_impossible , dtype=torch.float )
__magic_name__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__magic_name__ : Optional[int] = torch.tensor(feature.start_position , dtype=torch.long )
__magic_name__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 41 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( snake_case ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BridgeTowerImageProcessor'
UpperCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _a , _a ):
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
__magic_name__ : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
__magic_name__ : List[str] = self.image_processor(
_a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a )
encoding.update(_a )
return encoding
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.tokenizer.model_input_names
__magic_name__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 41 | 1 |
import argparse
import os
import re
import packaging.version
__UpperCAmelCase = "examples/"
__UpperCAmelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCAmelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCAmelCase = "README.md"
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with open(UpperCamelCase_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ = replace.replace('''VERSION''', UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ = re_pattern.sub(UpperCamelCase_, UpperCamelCase_ )
with open(UpperCamelCase_, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(UpperCamelCase_ )
def A__ ( __lowerCamelCase ):
for folder, directories, fnames in os.walk(UpperCamelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase_, UpperCamelCase_ ), UpperCamelCase_, pattern='''examples''' )
def A__ ( __lowerCamelCase, __lowerCamelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
if not patch:
update_version_in_examples(UpperCamelCase_ )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ = '''1. Want to contribute a new model?'''
with open(UpperCamelCase_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''', '''https://huggingface.co/docs/transformers/model_doc''', )
index += 1
with open(UpperCamelCase_, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(UpperCamelCase_ )
def A__ ( ):
with open(REPLACE_FILES['''init'''], '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
SCREAMING_SNAKE_CASE_ = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase_ ).groups()[0]
return packaging.version.parse(UpperCamelCase_ )
def A__ ( __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
SCREAMING_SNAKE_CASE_ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase_ ) == 0:
SCREAMING_SNAKE_CASE_ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCamelCase_, patch=UpperCamelCase_ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def A__ ( ):
SCREAMING_SNAKE_CASE_ = get_version()
SCREAMING_SNAKE_CASE_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
SCREAMING_SNAKE_CASE_ = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase_ ) == 0:
SCREAMING_SNAKE_CASE_ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCamelCase_ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 299 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@dataclass
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case = deprecated_arg[3:]
setattr(self , __snake_case , not kwargs.pop(__snake_case ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case = kwargs.pop('''torchscript''' , self.torchscript )
snake_case = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
snake_case = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__snake_case )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Trace the models using torchscript'} )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__magic_name__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
snake_case = torch.device('''cpu''' )
snake_case = 0
elif is_torch_tpu_available():
snake_case = xm.xla_device()
snake_case = 0
else:
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case = torch.cuda.device_count()
return device, n_gpu
@property
def a_ ( self ):
return is_torch_tpu_available() and self.tpu
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a_ ( self ):
return self.n_gpu > 0
| 127 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
SCREAMING_SNAKE_CASE_ : Dict = 5_0_0_0_0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 5_0_0_0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.split(__file__)
SCREAMING_SNAKE_CASE_ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _snake_case ( UpperCAmelCase_ : datasets.Dataset , UpperCAmelCase_ : List[Any] ):
for i in range(UpperCAmelCase_ ):
A__ = dataset[i]
@get_duration
def _snake_case ( UpperCAmelCase_ : datasets.Dataset , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ):
for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ ):
A__ = dataset[i : i + batch_size]
@get_duration
def _snake_case ( UpperCAmelCase_ : datasets.Dataset , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ):
with dataset.formatted_as(type=UpperCAmelCase_ ):
for i in range(UpperCAmelCase_ ):
A__ = dataset[i]
@get_duration
def _snake_case ( UpperCAmelCase_ : datasets.Dataset , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
with dataset.formatted_as(type=UpperCAmelCase_ ):
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = dataset[i : i + batch_size]
def _snake_case ( ):
A__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
A__ = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
A__ = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
A__ = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
A__ = generate_example_dataset(
os.path.join(UpperCAmelCase_ , """dataset.arrow""" ) , UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(UpperCAmelCase_ ) )
A__ = func(UpperCAmelCase_ , **UpperCAmelCase_ )
print("""shuffling dataset""" )
A__ = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(UpperCAmelCase_ ) )
A__ = func(
UpperCAmelCase_ , **UpperCAmelCase_ )
with open(UpperCAmelCase_ , """wb""" ) as f:
f.write(json.dumps(UpperCAmelCase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 350 |
"""simple docstring"""
import sys
from collections import defaultdict
class a :
"""simple docstring"""
def __init__( self: Union[str, Any] ):
"""simple docstring"""
A__ = []
def UpperCamelCase ( self: List[str] , UpperCamelCase: int ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: str ):
"""simple docstring"""
A__ = pos
def UpperCamelCase ( self: Dict , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A__ = 2 * start + 1
else:
A__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
A__ , A__ = heap[smallest_child], positions[smallest_child]
A__ , A__ = (
heap[start],
positions[start],
)
A__ , A__ = temp, tempa
A__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase )
self.top_to_bottom(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = position[index]
while index != 0:
A__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A__ = heap[parent]
A__ = position[parent]
self.set_position(position[parent] , UpperCamelCase )
else:
A__ = val
A__ = temp
self.set_position(UpperCamelCase , UpperCamelCase )
break
A__ = parent
else:
A__ = val
A__ = temp
self.set_position(UpperCamelCase , 0 )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = len(UpperCamelCase ) // 2 - 1
for i in range(UpperCamelCase , -1 , -1 ):
self.top_to_bottom(UpperCamelCase , UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = positions[0]
A__ = sys.maxsize
self.top_to_bottom(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase )
return temp
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = Heap()
A__ = [0] * len(UpperCAmelCase_ )
A__ = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A__ = [] # Heap of Distance of vertices from their neighboring vertex
A__ = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
A__ = []
A__ = 1
A__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A__ = 0
A__ = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
A__ = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
A__ = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
SCREAMING_SNAKE_CASE_ : int = int(input('Enter number of edges: ').strip())
SCREAMING_SNAKE_CASE_ : str = defaultdict(list)
for _ in range(edges_number):
SCREAMING_SNAKE_CASE_ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 69 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a: List[str] = logging.get_logger(__name__)
# General docstring
__a: str = """RegNetConfig"""
# Base docstring
__a: str = """facebook/regnet-y-040"""
__a: Union[str, Any] = [1, 10_88, 7, 7]
# Image classification docstring
__a: List[Any] = """facebook/regnet-y-040"""
__a: Any = """tabby, tabby cat"""
__a: Any = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" , **__lowerCAmelCase , ) -> Any:
super().__init__(**__lowerCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase__ : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase__ : str = tf.keras.layers.ConvaD(
filters=__lowerCAmelCase , kernel_size=__lowerCAmelCase , strides=__lowerCAmelCase , padding='''VALID''' , groups=__lowerCAmelCase , use_bias=__lowerCAmelCase , name='''convolution''' , )
lowercase__ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
lowercase__ : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = self.convolution(self.padding(__lowerCAmelCase ) )
lowercase__ : Optional[Any] = self.normalization(__lowerCAmelCase )
lowercase__ : str = self.activation(__lowerCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> Any:
super().__init__(**__lowerCAmelCase )
lowercase__ : str = config.num_channels
lowercase__ : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
lowercase__ : Dict = shape_list(__lowerCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase__ : List[Any] = tf.transpose(__lowerCAmelCase , perm=(0, 2, 3, 1) )
lowercase__ : Union[str, Any] = self.embedder(__lowerCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 2 , **__lowerCAmelCase ) -> Tuple:
super().__init__(**__lowerCAmelCase )
lowercase__ : List[str] = tf.keras.layers.ConvaD(
filters=__lowerCAmelCase , kernel_size=1 , strides=__lowerCAmelCase , use_bias=__lowerCAmelCase , name='''convolution''' )
lowercase__ : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(__lowerCAmelCase ) , training=__lowerCAmelCase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
super().__init__(**__lowerCAmelCase )
lowercase__ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCAmelCase , name='''pooler''' )
lowercase__ : List[str] = [
tf.keras.layers.ConvaD(filters=__lowerCAmelCase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__lowerCAmelCase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase__ : List[str] = self.pooler(__lowerCAmelCase )
for layer_module in self.attention:
lowercase__ : Union[str, Any] = layer_module(__lowerCAmelCase )
lowercase__ : Dict = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , **__lowerCAmelCase ) -> List[Any]:
super().__init__(**__lowerCAmelCase )
lowercase__ : List[str] = in_channels != out_channels or stride != 1
lowercase__ : Optional[Any] = max(1 , out_channels // config.groups_width )
lowercase__ : Tuple = (
TFRegNetShortCut(__lowerCAmelCase , stride=__lowerCAmelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase__ : List[str] = [
TFRegNetConvLayer(__lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(__lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase , name='''layer.2''' ),
]
lowercase__ : List[Any] = ACTaFN[config.hidden_act]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
lowercase__ : Optional[int] = hidden_state
for layer_module in self.layers:
lowercase__ : Union[str, Any] = layer_module(__lowerCAmelCase )
lowercase__ : Optional[Any] = self.shortcut(__lowerCAmelCase )
hidden_state += residual
lowercase__ : Any = self.activation(__lowerCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , **__lowerCAmelCase ) -> Tuple:
super().__init__(**__lowerCAmelCase )
lowercase__ : str = in_channels != out_channels or stride != 1
lowercase__ : Dict = max(1 , out_channels // config.groups_width )
lowercase__ : int = (
TFRegNetShortCut(__lowerCAmelCase , stride=__lowerCAmelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase__ : int = [
TFRegNetConvLayer(__lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(__lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(__lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase , name='''layer.3''' ),
]
lowercase__ : int = ACTaFN[config.hidden_act]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict:
lowercase__ : List[Any] = hidden_state
for layer_module in self.layers:
lowercase__ : Optional[int] = layer_module(__lowerCAmelCase )
lowercase__ : List[Any] = self.shortcut(__lowerCAmelCase )
hidden_state += residual
lowercase__ : Dict = self.activation(__lowerCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , **__lowerCAmelCase ) -> List[str]:
super().__init__(**__lowerCAmelCase )
lowercase__ : Optional[Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase__ : List[Any] = [
# downsampling is done in the first layer with stride of 2
layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , name='''layers.0''' ),
*[layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
for layer_module in self.layers:
lowercase__ : str = layer_module(__lowerCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> Any:
super().__init__(**__lowerCAmelCase )
lowercase__ : int = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase__ : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase , name=F"""stages.{i+1}""" ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(__lowerCAmelCase )
if output_hidden_states:
lowercase__ : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = RegNetConfig
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
super().__init__(**__lowerCAmelCase )
lowercase__ : Any = config
lowercase__ : Dict = TFRegNetEmbeddings(__lowerCAmelCase , name='''embedder''' )
lowercase__ : Tuple = TFRegNetEncoder(__lowerCAmelCase , name='''encoder''' )
lowercase__ : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCAmelCase , name='''pooler''' )
@unpack_inputs
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Dict = self.embedder(__lowerCAmelCase , training=__lowerCAmelCase )
lowercase__ : Tuple = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , training=__lowerCAmelCase )
lowercase__ : int = encoder_outputs[0]
lowercase__ : Dict = self.pooler(__lowerCAmelCase )
# Change to NCHW output format have uniformity in the modules
lowercase__ : List[str] = tf.transpose(__lowerCAmelCase , perm=(0, 3, 1, 2) )
lowercase__ : Dict = tf.transpose(__lowerCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase__ : Optional[Any] = tuple([tf.transpose(__lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = RegNetConfig
SCREAMING_SNAKE_CASE = "regnet"
SCREAMING_SNAKE_CASE = "pixel_values"
@property
def _lowerCAmelCase( self ) -> int:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__a: Any = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
__a: Dict = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , a__ , )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
super().__init__(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Optional[Any] = TFRegNetMainLayer(__lowerCAmelCase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : str = self.regnet(
pixel_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , training=__lowerCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
super().__init__(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Tuple = config.num_labels
lowercase__ : Union[str, Any] = TFRegNetMainLayer(__lowerCAmelCase , name='''regnet''' )
# classification head
lowercase__ : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Optional[Any] = self.regnet(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , training=__lowerCAmelCase )
lowercase__ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Tuple = self.classifier[0](__lowerCAmelCase )
lowercase__ : str = self.classifier[1](__lowerCAmelCase )
lowercase__ : Optional[int] = None if labels is None else self.hf_compute_loss(labels=__lowerCAmelCase , logits=__lowerCAmelCase )
if not return_dict:
lowercase__ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
| 198 | '''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = 0
lowercase__ : Any = [0]
lowercase__ : List[Any] = [0]
lowercase__ : Optional[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
lowercase__ : List[str] = [60]
lowercase__ : List[str] = [10]
lowercase__ : List[str] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = 3
lowercase__ : List[Any] = [1, 2, 3]
lowercase__ : Union[str, Any] = [3, 2, 1]
lowercase__ : List[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 5 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : int = 50
lowercase__ : Any = [60, 100, 120]
lowercase__ : int = [10, 20, 30]
lowercase__ : Optional[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 198 | 1 |
from math import isqrt
def lowerCamelCase__ ( snake_case_ : int ) -> str:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case_ ) + 1 ) )
def lowerCamelCase__ ( snake_case_ : int = 10**6 ) -> Union[str, Any]:
__snake_case = 0
__snake_case = 1
__snake_case = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [True] * limit
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCAmelCase_ : Dict = i * 2
while index < limit:
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Union[str, Any] = index + i
UpperCAmelCase_ : Tuple = [2]
for i in range(3 , UpperCamelCase_ , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase_ )
return primes
def lowerCamelCase_ ( _a : Dict = 100_0000 ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = prime_sieve(UpperCamelCase_ )
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : int = 0
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + length , len(UpperCamelCase_ ) ):
UpperCAmelCase_ : Tuple = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCAmelCase_ : str = j - i
UpperCAmelCase_ : Union[str, Any] = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 345 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
@add_end_docstrings(
__a , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__)
else:
raise ValueError("""Unsupported framework""")
return masked_index
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.get_masked_index(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case_ ( self , lowerCAmelCase__):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__):
if return_tensors is None:
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
self.ensure_exactly_one_mask_token(lowerCAmelCase__)
return model_inputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""]
return model_outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__SCREAMING_SNAKE_CASE = target_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_outputs["""input_ids"""][0]
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""]
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCAmelCase__ , axis=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0) , target_ids.reshape(-1 , 1))
__SCREAMING_SNAKE_CASE = tf.expand_dims(lowerCAmelCase__ , 0)
__SCREAMING_SNAKE_CASE = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy()
else:
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = probs[..., target_ids]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = probs.topk(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
__SCREAMING_SNAKE_CASE = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
__SCREAMING_SNAKE_CASE = input_ids.numpy().copy()
if target_ids is not None:
__SCREAMING_SNAKE_CASE = target_ids[p].tolist()
__SCREAMING_SNAKE_CASE = p
# Filter padding out:
__SCREAMING_SNAKE_CASE = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p]), """sequence""": sequence}
row.append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
if single_mask:
return result[0]
return result
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [targets]
try:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_vocab()
except Exception:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = []
for target in targets:
__SCREAMING_SNAKE_CASE = vocab.get(lowerCAmelCase__ , lowerCAmelCase__)
if id_ is None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["""input_ids"""]
if len(lowerCAmelCase__) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""")
continue
__SCREAMING_SNAKE_CASE = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.")
target_ids.append(id_)
__SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase__))
if len(lowerCAmelCase__) == 0:
raise ValueError("""At least one target must be provided when passed.""")
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase__)
return target_ids
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = {}
if targets is not None:
__SCREAMING_SNAKE_CASE = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = target_ids
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""")
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1:
return outputs[0]
return outputs
| 100 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = TextToVideoSDPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_A : Any = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
__lowercase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__lowercase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowercase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
__lowercase : Union[str, Any] = CLIPTextModel(__a )
__lowercase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : int=0 ) -> Tuple:
"""simple docstring"""
if str(__a ).startswith("""mps""" ):
__lowercase : Optional[int] = torch.manual_seed(__a )
else:
__lowercase : int = torch.Generator(device=__a ).manual_seed(__a )
__lowercase : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : List[str] = self.get_dummy_components()
__lowercase : Tuple = TextToVideoSDPipeline(**__a )
__lowercase : Union[str, Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[int] = self.get_dummy_inputs(__a )
__lowercase : Dict = """np"""
__lowercase : List[Any] = sd_pipe(**__a ).frames
__lowercase : List[str] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowercase : int = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowercase : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase : Dict = pipe.to("""cuda""" )
__lowercase : Optional[Any] = """Spiderman is surfing"""
__lowercase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase : Any = pipe(__a , generator=__a , num_inference_steps=25 , output_type="""pt""" ).frames
__lowercase : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowercase : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowercase : Dict = pipe.to("""cuda""" )
__lowercase : Any = """Spiderman is surfing"""
__lowercase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase : str = pipe(__a , generator=__a , num_inference_steps=2 , output_type="""pt""" ).frames
__lowercase : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2 | 366 |
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 306 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
snake_case_ : List[Any] = grid[0]
for row_n in range(1 , len(__a ) ):
snake_case_ : List[Any] = grid[row_n]
snake_case_ : Optional[int] = fill_row(__a , __a )
snake_case_ : List[str] = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(__a ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327 | 1 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
_UpperCamelCase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_UpperCamelCase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_UpperCamelCase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] ,)
def _UpperCamelCase ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self ,A ,A ,A=None ,A="uniform_average" ,A=True ):
UpperCAmelCase = mean_squared_error(
A ,A ,sample_weight=A ,multioutput=A ,squared=A )
return {"mse": mse}
| 234 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case = None , _snake_case = None ):
"""simple docstring"""
if start is None:
UpperCAmelCase = 0
if end is None:
UpperCAmelCase = len(_snake_case ) - 1
if start >= end:
return
UpperCAmelCase = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
UpperCAmelCase , UpperCAmelCase = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 234 | 1 |
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Union[str, Any] = name
__lowerCamelCase : Optional[int] = val
def __str__( self : str):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : str ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.val < other.val
class A_ :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : Any = self.build_heap(SCREAMING_SNAKE_CASE__)
def __getitem__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
return self.get_value(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
return (idx - 1) // 2
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return idx * 2 + 1
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
return idx * 2 + 2
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple):
return self.heap_dict[key]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE__) - 1
__lowerCamelCase : Any = self.get_parent_idx(SCREAMING_SNAKE_CASE__)
for idx, i in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Tuple = idx
__lowerCamelCase : Optional[Any] = i.val
for i in range(SCREAMING_SNAKE_CASE__ ,-1 ,-1):
self.sift_down(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return array
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any):
while True:
__lowerCamelCase : str = self.get_left_child_idx(SCREAMING_SNAKE_CASE__) # noqa: E741
__lowerCamelCase : List[str] = self.get_right_child_idx(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = idx
if l < len(SCREAMING_SNAKE_CASE__) and array[l] < array[idx]:
__lowerCamelCase : Optional[Any] = l
if r < len(SCREAMING_SNAKE_CASE__) and array[r] < array[smallest]:
__lowerCamelCase : List[str] = r
if smallest != idx:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = array[smallest], array[idx]
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowerCamelCase : Optional[int] = smallest
else:
break
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = self.get_parent_idx(SCREAMING_SNAKE_CASE__)
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowerCamelCase , __lowerCamelCase : int = self.heap[idx], self.heap[p]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowerCamelCase : str = p
__lowerCamelCase : int = self.get_parent_idx(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
return self.heap[0]
def lowerCAmelCase ( self : int):
__lowerCamelCase , __lowerCamelCase : Tuple = self.heap[-1], self.heap[0]
__lowerCamelCase , __lowerCamelCase : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowerCamelCase : Optional[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap)
return x
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
self.heap.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = len(self.heap) - 1
__lowerCamelCase : str = node.val
self.sift_up(len(self.heap) - 1)
def lowerCAmelCase ( self : Optional[int]):
return len(self.heap) == 0
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[Any]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowerCamelCase : List[str] = new_value
__lowerCamelCase : Dict = new_value
self.sift_up(self.idx_of_element[node])
a =Node("""R""", -1)
a =Node("""B""", 6)
a =Node("""A""", 3)
a =Node("""X""", 1)
a =Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
a =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | """simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44 | 0 |
'''simple docstring'''
import qiskit
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> qiskit.result.counts.Counts:
UpperCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 183 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spm_char.model"}
__A = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__A = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class A ( __UpperCAmelCase ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.sp_model.get_piece_size()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = self.sp_model.IdToPiece(lowerCamelCase__ )
return token
def A__ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = []
lowercase__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + suffix_ones
return ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 164 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=2 , ) -> Optional[int]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 2
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : int = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase : Any = False
lowerCamelCase : str = False
lowerCamelCase : str = False
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = DeiTModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase__ = problem_type["""title"""]
lowercase__ = problem_type["""num_labels"""]
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
lowercase__ = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> int:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
lowercase__ = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(lowerCamelCase__ )
| 164 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 111 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowercase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__lowerCamelCase , self ).__init__()
_SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.nn.CosineSimilarity(3 , 1e-08 )
_SCREAMING_SNAKE_CASE = torch.nn.Softmax(dim=1 )
def lowerCAmelCase_ ( self : Dict , **__lowerCamelCase : Any ):
"""simple docstring"""
return self.bert(**__lowerCamelCase ).last_hidden_state
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = W_supports["sizes"].tolist()
_SCREAMING_SNAKE_CASE = W_supports["start_token_id"].item()
_SCREAMING_SNAKE_CASE = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_SCREAMING_SNAKE_CASE = self.BERT(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.BERT(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = W_supports["input_ids"] == start_token_id
_SCREAMING_SNAKE_CASE = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__lowerCamelCase ):
if i == 0:
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = support_sizes[i - 1]
_SCREAMING_SNAKE_CASE = S[s : s + size][start_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE = S[s : s + size][end_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_SCREAMING_SNAKE_CASE = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_SCREAMING_SNAKE_CASE = torch.vstack((p_starts, p_start) )
_SCREAMING_SNAKE_CASE = torch.vstack((p_ends, p_end) )
else:
_SCREAMING_SNAKE_CASE = p_start
_SCREAMING_SNAKE_CASE = p_end
return p_starts, p_ends
| 111 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: str ) -> list[int]:
'''simple docstring'''
A__ = int(SCREAMING_SNAKE_CASE_ )
# Initialize Result
A__ = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE_ ):
# Find denominations
while int(SCREAMING_SNAKE_CASE_ ) >= int(SCREAMING_SNAKE_CASE_ ):
total_value -= int(SCREAMING_SNAKE_CASE_ )
answer.append(SCREAMING_SNAKE_CASE_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
lowerCAmelCase__ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
lowerCAmelCase__ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 68 |
import copy
import random
from transformers import CLIPTokenizer
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
A__ = {}
def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str:
'''simple docstring'''
A__ = super().add_tokens(lowercase , *lowercase , **lowercase )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
" `placeholder_token` that is not already in the tokenizer." )
def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any:
'''simple docstring'''
A__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
else:
A__ = []
for i in range(lowercase ):
A__ = placeholder_token + F'_{i}'
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
A__ = output
def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = []
for i in range(len(lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A__ = self.token_map[placeholder_token]
A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
A__ = copy.copy(lowercase )
random.shuffle(lowercase )
A__ = text.replace(lowercase , " ".join(lowercase ) )
return text
def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
| 68 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if length > 1:
lowerCamelCase__ : Optional[Any] = int(length / 2 )
for i in range(UpperCamelCase , low + middle ):
comp_and_swap(UpperCamelCase , UpperCamelCase , i + middle , UpperCamelCase )
bitonic_merge(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
bitonic_merge(UpperCamelCase , low + middle , UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
if length > 1:
lowerCamelCase__ : Tuple = int(length / 2 )
bitonic_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , 1 )
bitonic_sort(UpperCamelCase , low + middle , UpperCamelCase , 0 )
bitonic_merge(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =input('''Enter numbers separated by a comma:\n''').strip()
_A : List[str] =[int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 41 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = BlenderbotSmallTokenizer
UpperCamelCase : Union[str, Any] = False
def __A ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCamelCase = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
lowerCamelCase = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
lowerCamelCase = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def __A ( self , **A ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = """adapt act apte"""
lowerCamelCase = """adapt act apte"""
return input_text, output_text
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase = """adapt act apte"""
lowerCamelCase = ["""adapt""", """act""", """ap@@""", """te"""]
lowerCamelCase = tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCamelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
lowerCamelCase = """I am a small frog."""
lowerCamelCase = tok([src_text] , padding=A , truncation=A )["""input_ids"""]
lowerCamelCase = tok.batch_decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
lowerCamelCase = """I am a small frog ."""
lowerCamelCase = """."""
lowerCamelCase = tok(A )["""input_ids"""]
lowerCamelCase = tok(A )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 371 |
UpperCAmelCase : Tuple = "Tobias Carryer"
from time import time
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A , A=int(time() ) ) -> Optional[int]: # noqa: B008
'''simple docstring'''
lowerCamelCase = multiplier
lowerCamelCase = increment
lowerCamelCase = modulo
lowerCamelCase = seed
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase : List[Any] = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 66 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = HfArgumentParser(a_ )
_UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase : List[Any] = TensorFlowBenchmark(args=a_ )
try:
_UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase : Optional[int] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_UpperCAmelCase : List[str] = " ".join(str(a_ ).split(" " )[:-1] )
_UpperCAmelCase : Tuple = ""
_UpperCAmelCase : List[Any] = eval(str(a_ ).split(" " )[-1] )
_UpperCAmelCase : Any = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(a_ )
if len(a_ ) > 0:
_UpperCAmelCase : str = full_error_msg + begin_error_msg + str(a_ )
raise ValueError(a_ )
benchmark.run()
if __name__ == "__main__":
main() | 145 | """simple docstring"""
from math import factorial
def UpperCAmelCase ( UpperCAmelCase = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(UpperCAmelCase ) / (factorial(UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 69 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=33, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Dict:
UpperCamelCase : Dict = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Tuple = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : str = use_input_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : List[str] = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Optional[int] = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : int = num_choices
UpperCamelCase : Union[str, Any] = scope
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Tuple:
return EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : str = EsmModel(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : Dict = model(__A, attention_mask=__A )
UpperCamelCase : Any = model(__A )
UpperCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : List[Any] = EsmForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : Optional[int] = model(__A, attention_mask=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : Optional[int] = EsmForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCamelCase : Dict = model(__A, attention_mask=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Any = self.prepare_config_and_inputs()
(
UpperCamelCase
) : Optional[int] = config_and_inputs
UpperCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Union[str, Any] = ()
UpperCAmelCase__ : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = True
def snake_case_ ( self ) -> str:
UpperCamelCase : Any = EsmModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self, config_class=__A, hidden_size=37 )
def snake_case_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : List[Any] = type
self.model_tester.create_and_check_model(*__A )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Any = EsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
UpperCamelCase : Dict = EsmEmbeddings(config=__A )
UpperCamelCase : Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
UpperCamelCase : str = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCamelCase : List[Any] = create_position_ids_from_input_ids(__A, model.padding_idx )
self.assertEqual(position_ids.shape, expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__A, __A ) ) )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()[0]
UpperCamelCase : List[str] = EsmEmbeddings(config=__A )
UpperCamelCase : int = torch.empty(2, 4, 30 )
UpperCamelCase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCamelCase : Dict = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__A )
self.assertEqual(position_ids.shape, expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__A, __A ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> str:
pass
@require_torch
class lowerCAmelCase_ ( A__ ):
@slow
def snake_case_ ( self ) -> List[str]:
with torch.no_grad():
UpperCamelCase : int = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCamelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : Optional[int] = model(__A )[0]
UpperCamelCase : Optional[Any] = 33
UpperCamelCase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape, __A )
UpperCamelCase : List[str] = torch.tensor(
[[[8.92_15, -10.5898, -6.46_71], [-6.39_67, -13.9114, -1.12_12], [-7.78_12, -13.9516, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __A, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> List[str]:
with torch.no_grad():
UpperCamelCase : Any = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCamelCase : Dict = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : Union[str, Any] = model(__A )[0]
# compare the actual values for a slice.
UpperCamelCase : List[str] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __A, atol=1e-4 ) )
| 351 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : List[str] = 9
UpperCamelCase : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase : int = kruskal(snake_case__ , snake_case__ )
UpperCamelCase : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(snake_case__ ) == sorted(snake_case__ )
| 103 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Any = KandinskyVaaInpaintPipeline
A_ : Tuple = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
A_ : List[str] = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
A_ : List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.dummy_unet
__lowerCAmelCase : List[str] = self.dummy_movq
__lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase : Dict = 0
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'cpu'
__lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
__lowerCAmelCase : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Optional[Any] = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : str = image[0, -3:, -3:, -1]
__lowerCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : List[Any] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCAmelCase : Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Dict = 'a hat'
__lowerCAmelCase : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__lowerCAmelCase : str = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCAmelCase : int = pipeline(
image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
__lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from __future__ import annotations
_lowercase : Dict = 1.6_021E-19 # units = C
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = "efficientnet"
def __init__( self, __magic_name__ = 3, __magic_name__ = 600, __magic_name__ = 2.0, __magic_name__ = 3.1, __magic_name__ = 8, __magic_name__ = [3, 3, 5, 3, 5, 5, 3], __magic_name__ = [32, 16, 24, 40, 80, 112, 192], __magic_name__ = [16, 24, 40, 80, 112, 192, 320], __magic_name__ = [], __magic_name__ = [1, 2, 2, 2, 1, 2, 1], __magic_name__ = [1, 2, 2, 3, 3, 4, 1], __magic_name__ = [1, 6, 6, 6, 6, 6, 6], __magic_name__ = 0.25, __magic_name__ = "swish", __magic_name__ = 2560, __magic_name__ = "mean", __magic_name__ = 0.02, __magic_name__ = 0.001, __magic_name__ = 0.99, __magic_name__ = 0.5, __magic_name__ = 0.2, **__magic_name__, ) -> Dict:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : List[str] = width_coefficient
UpperCamelCase__ : Optional[Any] = depth_coefficient
UpperCamelCase__ : Tuple = depth_divisor
UpperCamelCase__ : Tuple = kernel_sizes
UpperCamelCase__ : int = in_channels
UpperCamelCase__ : Optional[int] = out_channels
UpperCamelCase__ : int = depthwise_padding
UpperCamelCase__ : Union[str, Any] = strides
UpperCamelCase__ : Union[str, Any] = num_block_repeats
UpperCamelCase__ : Dict = expand_ratios
UpperCamelCase__ : Optional[int] = squeeze_expansion_ratio
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : int = hidden_dim
UpperCamelCase__ : Optional[Any] = pooling_type
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Union[str, Any] = batch_norm_eps
UpperCamelCase__ : Union[str, Any] = batch_norm_momentum
UpperCamelCase__ : int = dropout_rate
UpperCamelCase__ : Optional[Any] = drop_connect_rate
UpperCamelCase__ : int = sum(__magic_name__ ) * 4
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = version.parse("1.11" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-5
| 247 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase__ : Dict = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__UpperCAmelCase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
UpperCamelCase__ : Any = parser.parse_args()
return args
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> Any:
def fn(__UpperCAmelCase: Dict ):
return tokenizer(examples['''text'''] )
return fn
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Dict:
UpperCamelCase__ : Optional[int] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
UpperCamelCase__ : Dict = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
UpperCamelCase__ : int = tf.train.Features(feature=__UpperCAmelCase )
UpperCamelCase__ : Tuple = tf.train.Example(features=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = example.SerializeToString()
records.append(__UpperCAmelCase )
return records
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> int:
UpperCamelCase__ : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase__ : int = min(len(__UpperCAmelCase ) , args.limit )
UpperCamelCase__ : Optional[int] = dataset.select(range(__UpperCAmelCase ) )
print(f"Limiting the dataset to {args.limit} entries." )
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase__ : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
UpperCamelCase__ : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase__ : Optional[int] = tokenize_function(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCAmelCase: Optional[Any] ):
# Concatenate all texts.
UpperCamelCase__ : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase__ : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase__ : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase__ : Dict = {
k: [t[i : i + args.max_length] for i in range(0 , __UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase__ : Optional[Any] = dataset_tokenized.map(__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=1000 , num_proc=4 )
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Optional[Any] = 0
for shard in range(0 , len(__UpperCAmelCase ) , args.shard_size ):
UpperCamelCase__ : Optional[int] = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase__ : Any = len(dataset_snapshot['''input_ids'''] )
UpperCamelCase__ : Optional[int] = os.path.join(__UpperCAmelCase , f"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase__ : List[str] = get_serialized_examples(__UpperCAmelCase )
with tf.io.TFRecordWriter(__UpperCAmelCase ) as out_file:
for i in range(len(__UpperCAmelCase ) ):
UpperCamelCase__ : str = serialized_examples[i]
out_file.write(__UpperCAmelCase )
print('''Wrote file {} containing {} records'''.format(__UpperCAmelCase , __UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(f"Total {args.split} records: {total_records}" , file=__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 247 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _lowercase :
def __init__( self: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=13 , UpperCamelCase__: Optional[int]=7 , UpperCamelCase__: List[str]=False , UpperCamelCase__: str=True , UpperCamelCase__: Union[str, Any]=False , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=33 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[Any]=5 , UpperCamelCase__: Union[str, Any]=4 , UpperCamelCase__: Any=37 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: int=16 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Union[str, Any]=4 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Dict = batch_size
lowerCamelCase__ : Tuple = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Dict = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Optional[int] = num_labels
lowerCamelCase__ : List[str] = num_choices
lowerCamelCase__ : Dict = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Tuple = None
if self.use_input_mask:
lowerCamelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: List[Any] ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Dict ):
lowerCamelCase__ : Dict = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ )
lowerCamelCase__ : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[Any] = self.num_labels
lowerCamelCase__ : Optional[Any] = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict = config_and_inputs
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a = False
a = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
a = ()
a = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = EsmModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: int ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Optional[int] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self: int ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase__ : Optional[int] = EsmEmbeddings(config=UpperCAmelCase_ )
lowerCamelCase__ : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase__ : Optional[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase__ : List[Any] = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase__ : List[str] = EsmEmbeddings(config=UpperCAmelCase_ )
lowerCamelCase__ : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase__ : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase__ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase__ : str = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self: Dict ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Any ):
pass
@require_torch
class _lowercase ( _UpperCAmelCase ):
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
with torch.no_grad():
lowerCamelCase__ : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : List[str] = model(UpperCAmelCase_ )[0]
lowerCamelCase__ : Any = 33
lowerCamelCase__ : List[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
lowerCamelCase__ : str = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Dict ):
with torch.no_grad():
lowerCamelCase__ : str = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase__ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase__ : Dict = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
lowerCamelCase__ : Dict = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 41 |
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase__ : Dict = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def a__ ( lowercase : int, lowercase : str, lowercase : int ) -> Dict:
"""simple docstring"""
_UpperCamelCase = state_dict.pop(_UpperCAmelCase )
_UpperCamelCase = val
def a__ ( lowercase : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCamelCase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
_UpperCamelCase = value
else:
_UpperCamelCase = value
return new_state_dict
def a__ ( lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase = in_proj_weight_cross_attn[:256, :]
_UpperCamelCase = in_proj_bias_cross_attn[:256]
_UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
_UpperCamelCase = in_proj_bias_cross_attn[256:512]
_UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
_UpperCamelCase = in_proj_bias_cross_attn[-256:]
def a__ ( lowercase : Dict, lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = image.size
_UpperCamelCase = max(_UpperCAmelCase, _UpperCAmelCase )
_UpperCamelCase = 800 if "detection" in checkpoint_url else 1000
_UpperCamelCase = target_max_size / current_max_size
_UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( lowercase : Optional[int] ) -> int:
"""simple docstring"""
_UpperCamelCase = F.to_tensor(_UpperCAmelCase )
_UpperCamelCase = F.normalize(_UpperCAmelCase, mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a__ ( lowercase : Tuple, lowercase : List[str], lowercase : Dict ) -> List[str]:
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
_UpperCamelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
_UpperCamelCase = rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase = "model."
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_UpperCamelCase = state_dict.pop(_UpperCAmelCase )
_UpperCamelCase = val
# create HuggingFace model and load state dict
_UpperCamelCase = TableTransformerConfig(
backbone='''resnet18''', mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
_UpperCamelCase = 15
_UpperCamelCase = 2
_UpperCamelCase = {0: "table", 1: "table rotated"}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCamelCase = 125
_UpperCamelCase = 6
_UpperCamelCase = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = DetrImageProcessor(
format='''coco_detection''', max_size=800 if '''detection''' in checkpoint_url else 1000 )
_UpperCamelCase = TableTransformerForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion
_UpperCamelCase = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_UpperCamelCase = hf_hub_download(repo_id='''nielsr/example-pdf''', repo_type='''dataset''', filename=_UpperCAmelCase )
_UpperCamelCase = Image.open(_UpperCAmelCase ).convert('''RGB''' )
_UpperCamelCase = normalize(resize(_UpperCAmelCase, _UpperCAmelCase ) ).unsqueeze(0 )
_UpperCamelCase = model(_UpperCAmelCase )
if "detection" in checkpoint_url:
_UpperCamelCase = (1, 15, 3)
_UpperCamelCase = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
_UpperCamelCase = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
_UpperCamelCase = (1, 125, 7)
_UpperCamelCase = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
_UpperCamelCase = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], _UpperCAmelCase, atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], _UpperCAmelCase, atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
_UpperCamelCase = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_UpperCAmelCase )
image_processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 365 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'biogpt'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=42384 , lowerCAmelCase__ : Optional[int]=1024 , lowerCAmelCase__ : List[str]=24 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Optional[int]=4096 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-1_2 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Optional[Any]=2 , **lowerCAmelCase__ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 287 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCamelCase__ = 'docs/source/en/_toctree.yml'
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = defaultdict(__lowerCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_UpperCAmelCase : Union[str, Any] = []
for duplicate_key in duplicates:
_UpperCAmelCase : Optional[int] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() )
def __lowerCAmelCase (__lowerCAmelCase=False ):
with open(__lowerCAmelCase , encoding="utf-8" ) as f:
_UpperCAmelCase : List[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase : Any = content[api_idx]["sections"]
# Then to the model doc
_UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_UpperCAmelCase : Dict = api_doc[model_idx]["sections"]
_UpperCAmelCase : List[Any] = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if "sections" in section]
_UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
_UpperCAmelCase : Tuple = modality_doc["sections"]
_UpperCAmelCase : Dict = clean_model_doc_toc(__lowerCAmelCase )
if old_modality_doc != new_modality_doc:
_UpperCAmelCase : Optional[int] = True
if overwrite:
_UpperCAmelCase : List[Any] = new_modality_doc
if diff:
if overwrite:
_UpperCAmelCase : Tuple = model_doc
_UpperCAmelCase : Optional[Any] = api_doc
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCamelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 234 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = old_name
if "patch_embed" in old_name:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = old_name.split("." )
if layer == "0":
_UpperCAmelCase : List[str] = old_name.replace("0" , "convolution1" )
elif layer == "1":
_UpperCAmelCase : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_UpperCAmelCase : Tuple = old_name.replace("3" , "convolution2" )
else:
_UpperCAmelCase : Tuple = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = R"\b\d{2}\b"
if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ):
_UpperCAmelCase : Optional[int] = re.search(R"\d\.\d\d." , __lowerCAmelCase ).group()
else:
_UpperCAmelCase : Any = re.search(R"\d\.\d." , __lowerCAmelCase ).group()
if int(match[0] ) < 6:
_UpperCAmelCase : str = old_name.replace(__lowerCAmelCase , "" )
_UpperCAmelCase : Optional[Any] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_UpperCAmelCase : Union[str, Any] = "intermediate_stages." + trimmed_name
else:
_UpperCAmelCase : Tuple = old_name.replace(__lowerCAmelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
_UpperCAmelCase : Any = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_UpperCAmelCase : List[str] = str(int(match[2] ) - num_meta4D_last_stage )
_UpperCAmelCase : int = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_UpperCAmelCase : Tuple = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_UpperCAmelCase : int = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_UpperCAmelCase : Optional[int] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_UpperCAmelCase : List[str] = trimmed_name.replace("fc2" , "linear_out" )
_UpperCAmelCase : Optional[Any] = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_UpperCAmelCase : Union[str, Any] = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_UpperCAmelCase : List[Any] = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_UpperCAmelCase : List[Any] = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_UpperCAmelCase : Union[str, Any] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_UpperCAmelCase : List[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_UpperCAmelCase : str = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_UpperCAmelCase : List[str] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_UpperCAmelCase : List[Any] = new_name.replace("norm" , "layernorm" )
_UpperCAmelCase : Any = "efficientformer." + new_name
else:
_UpperCAmelCase : Dict = "efficientformer.encoder." + new_name
return new_name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key in checkpoint.copy().keys():
_UpperCAmelCase : List[Any] = checkpoint.pop(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = val
return checkpoint
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
_UpperCAmelCase : Dict = EfficientFormerConfig.from_json_file(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase )
_UpperCAmelCase : Tuple = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_UpperCAmelCase : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
_UpperCAmelCase : Optional[int] = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
_UpperCAmelCase : Optional[Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : List[str] = 256
_UpperCAmelCase : Optional[int] = 224
_UpperCAmelCase : Tuple = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_UpperCAmelCase : Any = processor(images=__lowerCAmelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
_UpperCAmelCase : int = Compose(
[
Resize(__lowerCAmelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
Normalize(__lowerCAmelCase , __lowerCAmelCase ),
] )
_UpperCAmelCase : Any = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = model(__lowerCAmelCase )
_UpperCAmelCase : Dict = outputs.logits
_UpperCAmelCase : Optional[int] = (1, 1_000)
if "l1" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowerCAmelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 234 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
snake_case = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE : Optional[Any] = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=lowercase , required=lowercase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=lowercase , required=lowercase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=lowercase , choices=["distilbert", "roberta", "gpt2"] , required=lowercase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=lowercase , required=lowercase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=lowercase , type=lowercase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=lowercase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=lowercase , required=lowercase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=lowercase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=lowercase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=lowercase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=lowercase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=lowercase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=lowercase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=lowercase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=lowercase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=lowercase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=lowercase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=lowercase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=lowercase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=lowercase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=lowercase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=lowercase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=lowercase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=lowercase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=lowercase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=lowercase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=lowercase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=lowercase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=lowercase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=lowercase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=lowercase , default=4000 , help="Checkpoint interval." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE : Tuple = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE : List[str] = tokenizer.all_special_tokens.index(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = special_tok_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
SCREAMING_SNAKE_CASE : Tuple = pickle.load(lowercase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(lowercase )
SCREAMING_SNAKE_CASE : List[str] = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(lowercase )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Dict = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
SCREAMING_SNAKE_CASE : Any = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE : str = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
SCREAMING_SNAKE_CASE : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
SCREAMING_SNAKE_CASE : Dict = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 319 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int = 1000000 ) -> int:
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
lowerCamelCase_ = 0
lowerCamelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase_ = counter
if counter > pre_counter:
lowerCamelCase_ = inputa
lowerCamelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 183 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict = """Salesforce/blip-image-captioning-base"""
SCREAMING_SNAKE_CASE : int = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
SCREAMING_SNAKE_CASE : Optional[int] = """image_captioner"""
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForVisionaSeq
SCREAMING_SNAKE_CASE : int = ["""image"""]
SCREAMING_SNAKE_CASE : Optional[Any] = ["""text"""]
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
requires_backends(self , ['vision'] )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : "Image" ) -> Dict:
return self.pre_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
return self.model.generate(**__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0].strip()
| 183 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a__ ( __lowercase ) -> int:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def a__ ( __lowercase ) -> List[str]:
_A = emb.weight.shape
_A = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase=None ) -> int:
_A = {}
for old_key in state_dict.keys():
_A = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_A = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
_A = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
_A = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
_A = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
_A = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
_A = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
_A = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
_A = key.replace("final_layer_norm" , "ff_layer_norm" )
_A = state_dict[old_key]
return new_dict
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = WEIGHTS_NAME ) -> Any:
_A = []
_A = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
for expert in range(__lowerCamelCase ):
_A = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(__lowerCamelCase ):
_A = torch.load(__lowerCamelCase )["model"]
remove_ignore_keys_(__lowerCamelCase )
_A = rename_fairseq_keys(__lowerCamelCase , __lowerCamelCase )
_A = os.path.join(
__lowerCamelCase , weights_name.replace(".bin" , f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCamelCase )[0]].dtype )
# Add the last block
_A = os.path.join(__lowerCamelCase , weights_name.replace(".bin" , f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
_A = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__lowerCamelCase )
_A = rename_fairseq_keys(__lowerCamelCase , __lowerCamelCase )
_A = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCamelCase ) == 1:
_A = os.path.join(__lowerCamelCase , __lowerCamelCase )
torch.save(__lowerCamelCase , __lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCamelCase , __lowerCamelCase )
# Otherwise, let's build the index
_A = {}
for idx, shard in enumerate(__lowerCamelCase ):
_A = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin""" )
_A = os.path.join(__lowerCamelCase , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
for key in shard:
_A = shard_file
# Add the metadata
_A = {"total_size": total_size}
_A = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , "w" , encoding="utf-8" ) as f:
_A = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + "\n"
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
a_ = parser.parse_args()
a_ , a_ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path) | 351 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = SavedModel()
_A = []
with open(os.path.join(__lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_A = json.load(__lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A = sorted(__lowercase )
_A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__lowercase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*__lowercase , sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
a_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 163 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
for attribute in key.split("""."""):
__snake_case: Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if weight_type is not None:
__snake_case: List[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).shape
else:
__snake_case: List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case: Optional[Any] = value
elif weight_type == "weight_g":
__snake_case: Optional[Any] = value
elif weight_type == "weight_v":
__snake_case: Tuple = value
elif weight_type == "bias":
__snake_case: List[str] = value
else:
__snake_case: Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: str = []
__snake_case: Union[str, Any] = fairseq_model.state_dict()
__snake_case: Dict = hf_model.feature_extractor
__snake_case: Dict = hf_model.adapter
for name, value in fairseq_dict.items():
__snake_case: List[str] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
__snake_case: Optional[int] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""]):
load_adapter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
__snake_case: Any = True
if "*" in mapped_key:
__snake_case: Any = name.split(SCREAMING_SNAKE_CASE__)[0].split(""".""")[-2]
__snake_case: List[str] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__)
if "weight_g" in name:
__snake_case: Optional[int] = """weight_g"""
elif "weight_v" in name:
__snake_case: Tuple = """weight_v"""
elif "bias" in name:
__snake_case: Optional[Any] = """bias"""
elif "weight" in name:
__snake_case: Optional[int] = """weight"""
else:
__snake_case: Dict = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__)
logger.warning(F'''Unused weights: {unused_weights}''')
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Dict = full_name.split("""conv_layers.""")[-1]
__snake_case: List[Any] = name.split(""".""")
__snake_case: List[str] = int(items[0])
__snake_case: Optional[Any] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case: Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case: Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case: Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case: Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[Any]:
__snake_case: Optional[Any] = full_name.split("""adaptor.""")[-1]
__snake_case: Tuple = name.split(""".""")
if items[1].isdigit():
__snake_case: Tuple = int(items[1])
else:
__snake_case: Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
__snake_case: int = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''')
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
__snake_case: str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
__snake_case: Union[str, Any] = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''')
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
__snake_case: Union[str, Any] = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''')
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
__snake_case: Optional[Any] = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''')
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
__snake_case: int = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''')
else:
unused_weights.append(SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
__snake_case , __snake_case: Optional[Any] = emb.weight.shape
__snake_case: List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__)
__snake_case: str = emb.weight.data
return lin_layer
@torch.no_grad()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Dict:
__snake_case: Tuple = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , add_adapter=SCREAMING_SNAKE_CASE__ , adapter_stride=SCREAMING_SNAKE_CASE__ , adapter_kernel_size=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , output_hidden_size=SCREAMING_SNAKE_CASE__ , )
__snake_case: int = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
# load model
__snake_case , __snake_case , __snake_case: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""")[:-1]),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
__snake_case: List[Any] = model[0].eval()
# load feature extractor
__snake_case: str = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__)
# set weights for wav2vec2 encoder
__snake_case: Dict = WavaVecaModel(SCREAMING_SNAKE_CASE__)
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__)
# load decoder weights
__snake_case: Union[str, Any] = MBartForCausalLM(SCREAMING_SNAKE_CASE__)
__snake_case , __snake_case: Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__)
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''')
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''')
__snake_case: Dict = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = False
__snake_case: Union[str, Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE__)
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = hf_wavavec.config.to_dict()
__snake_case: int = tokenizer.pad_token_id
__snake_case: Union[str, Any] = tokenizer.bos_token_id
__snake_case: Tuple = tokenizer.eos_token_id
__snake_case: List[Any] = """mbart50"""
__snake_case: Optional[int] = """wav2vec2"""
__snake_case: Union[str, Any] = tokenizer.eos_token_id
__snake_case: str = 25_0004
__snake_case: Optional[Any] = tokenizer.eos_token_id
__snake_case: List[Any] = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__)
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__)
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
__UpperCAmelCase : Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 111 |
from __future__ import annotations
import typing
from collections import Counter
def A__ ( SCREAMING_SNAKE_CASE__) -> typing.Counter[int]:
__snake_case: typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1):
for perpendicular in range(SCREAMING_SNAKE_CASE__ , max_perimeter + 1):
__snake_case: Dict = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(SCREAMING_SNAKE_CASE__):
__snake_case: Any = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def A__ ( SCREAMING_SNAKE_CASE__ = 1000) -> int:
__snake_case: List[str] = pythagorean_triple(SCREAMING_SNAKE_CASE__)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 111 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = IFInpaintingSuperResolutionPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : Optional[int] = torch.manual_seed(lowercase )
else:
A_ : Tuple = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : int = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 192 | import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''mask2former'''
lowerCamelCase_ = ['''swin''']
lowerCamelCase_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowercase = None , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 1_0_2_4 , lowercase = "relu" , lowercase = 6 , lowercase = 1_0 , lowercase = 8 , lowercase = 0.0 , lowercase = 2_0_4_8 , lowercase = False , lowercase = False , lowercase = 4 , lowercase = 2_5_5 , lowercase = 1_0_0 , lowercase = 0.1 , lowercase = 2.0 , lowercase = 5.0 , lowercase = 5.0 , lowercase = 1_2_5_4_4 , lowercase = 3.0 , lowercase = 0.75 , lowercase = 0.02 , lowercase = 1.0 , lowercase = True , lowercase = [4, 8, 1_6, 3_2] , lowercase = None , **lowercase , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
A_ : List[str] = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowercase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase , lowercase ):
A_ : str = backbone_config.pop('model_type' )
A_ : List[str] = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
A_ : List[Any] = backbone_config
A_ : Optional[Any] = feature_size
A_ : int = mask_feature_size
A_ : Tuple = hidden_dim
A_ : Dict = encoder_feedforward_dim
A_ : int = activation_function
A_ : str = encoder_layers
A_ : Tuple = decoder_layers
A_ : Tuple = num_attention_heads
A_ : str = dropout
A_ : List[str] = dim_feedforward
A_ : List[str] = pre_norm
A_ : Tuple = enforce_input_projection
A_ : Dict = common_stride
A_ : Union[str, Any] = ignore_value
A_ : List[Any] = num_queries
A_ : List[Any] = no_object_weight
A_ : int = class_weight
A_ : int = mask_weight
A_ : Optional[Any] = dice_weight
A_ : int = train_num_points
A_ : Optional[int] = oversample_ratio
A_ : Tuple = importance_sample_ratio
A_ : Union[str, Any] = init_std
A_ : List[Any] = init_xavier_std
A_ : Optional[Any] = use_auxiliary_loss
A_ : Dict = feature_strides
A_ : List[Any] = output_auxiliary_logits
A_ : Optional[int] = decoder_layers
super().__init__(**lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , **lowercase ):
"""simple docstring"""
return cls(
backbone_config=lowercase , **lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.backbone_config.to_dict()
A_ : Dict = self.__class__.model_type
return output
| 192 | 1 |
import datasets
from .evaluate import evaluate
A : str = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
A : Any = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
A : str = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
__a = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
__a = evaluate(dataset=_snake_case , predictions=_snake_case )
return score | 6 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 360 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = 5_02_57 , lowerCAmelCase = 10_24 , lowerCAmelCase = 7_68 , lowerCAmelCase = 12 , lowerCAmelCase = 12 , lowerCAmelCase = None , lowerCAmelCase = "gelu_new" , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 0.02 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = False , lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__()
snake_case = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
snake_case = prefix_inner_dim
snake_case = prefix_hidden_dim
snake_case = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case = GPTaConfig(
vocab_size=lowerCAmelCase , n_positions=lowerCAmelCase , n_embd=lowerCAmelCase , n_layer=lowerCAmelCase , n_head=lowerCAmelCase , n_inner=lowerCAmelCase , activation_function=lowerCAmelCase , resid_pdrop=lowerCAmelCase , embd_pdrop=lowerCAmelCase , attn_pdrop=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase , initializer_range=lowerCAmelCase , scale_attn_weights=lowerCAmelCase , use_cache=lowerCAmelCase , scale_attn_by_inverse_layer_idx=lowerCAmelCase , reorder_and_upcast_attn=lowerCAmelCase , )
snake_case = GPTaLMHeadModel(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = self.transformer.transformer.wte(lowerCAmelCase )
snake_case = self.encode_prefix(lowerCAmelCase )
snake_case = self.decode_prefix(lowerCAmelCase )
snake_case = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case = self.transformer(inputs_embeds=lowerCAmelCase , labels=lowerCAmelCase , attention_mask=lowerCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return torch.zeros(lowerCAmelCase , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase )
@torch.no_grad()
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = torch.split(lowerCAmelCase , 1 , dim=0 )
snake_case = []
snake_case = []
for feature in features:
snake_case = self.decode_prefix(feature.to(lowerCAmelCase ) ) # back to the clip feature
# Only support beam search for now
snake_case ,snake_case = self.generate_beam(
input_embeds=lowerCAmelCase , device=lowerCAmelCase , eos_token_id=lowerCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case = torch.stack(lowerCAmelCase )
snake_case = torch.stack(lowerCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = 5 , lowerCAmelCase = 67 , lowerCAmelCase = 1.0 , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = eos_token_id
snake_case = None
snake_case = None
snake_case = torch.ones(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.int )
snake_case = torch.zeros(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.bool )
if input_embeds is not None:
snake_case = input_embeds
else:
snake_case = self.transformer.transformer.wte(lowerCAmelCase )
for i in range(lowerCAmelCase ):
snake_case = self.transformer(inputs_embeds=lowerCAmelCase )
snake_case = outputs.logits
snake_case = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case = logits.softmax(-1 ).log()
if scores is None:
snake_case ,snake_case = logits.topk(lowerCAmelCase , -1 )
snake_case = generated.expand(lowerCAmelCase , *generated.shape[1:] )
snake_case ,snake_case = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case = next_tokens
else:
snake_case = tokens.expand(lowerCAmelCase , *tokens.shape[1:] )
snake_case = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case = -float(np.inf )
snake_case = 0
snake_case = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case = scores_sum / seq_lengths[:, None]
snake_case ,snake_case = scores_sum_average.view(-1 ).topk(lowerCAmelCase , -1 )
snake_case = next_tokens // scores_sum.shape[1]
snake_case = seq_lengths[next_tokens_source]
snake_case = next_tokens % scores_sum.shape[1]
snake_case = next_tokens.unsqueeze(1 )
snake_case = tokens[next_tokens_source]
snake_case = torch.cat((tokens, next_tokens) , dim=1 )
snake_case = generated[next_tokens_source]
snake_case = scores_sum_average * seq_lengths
snake_case = is_stopped[next_tokens_source]
snake_case = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case = torch.cat((generated, next_token_embed) , dim=1 )
snake_case = is_stopped + next_tokens.eq(lowerCAmelCase ).squeeze()
if is_stopped.all():
break
snake_case = scores / seq_lengths
snake_case = scores.argsort(descending=lowerCAmelCase )
# tokens tensors are already padded to max_seq_length
snake_case = [tokens[i] for i in order]
snake_case = torch.stack(lowerCAmelCase , dim=0 )
snake_case = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149 | 0 |
import math
class snake_case__:
'''simple docstring'''
def lowercase_ ( self , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : Any = 0.0
lowerCAmelCase_ : Tuple = 0.0
for i in range(len(__lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> list[list[int | float]]:
for i in range(len(__lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCAmelCase_ : List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCAmelCase_ : Optional[Any] = SelfOrganizingMap()
lowerCAmelCase_ : Dict = 3
lowerCAmelCase_ : Dict = 0.5
for _ in range(_lowercase ):
for j in range(len(_lowercase ) ):
# training sample
lowerCAmelCase_ : List[Any] = training_samples[j]
# Compute the winning vector
lowerCAmelCase_ : Optional[int] = self_organizing_map.get_winner(_lowercase , _lowercase )
# Update the winning vector
lowerCAmelCase_ : List[str] = self_organizing_map.update(_lowercase , _lowercase , _lowercase , _lowercase )
# classify test sample
lowerCAmelCase_ : str = [0, 0, 0, 1]
lowerCAmelCase_ : List[Any] = self_organizing_map.get_winner(_lowercase , _lowercase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main() | 262 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :Union[str, Any] = controlnet_params
snake_case_ :Union[str, Any] = """bird"""
snake_case_ :List[Any] = jax.device_count()
snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ :Any = jax.random.PRNGKey(0 )
snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() )
snake_case_ :List[Any] = replicate(snake_case )
snake_case_ :List[str] = shard(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :Dict = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1]
snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Dict = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :str = controlnet_params
snake_case_ :Optional[int] = """Chef in the kitchen"""
snake_case_ :Union[str, Any] = jax.device_count()
snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ :str = jax.random.PRNGKey(0 )
snake_case_ :str = jax.random.split(snake_case , jax.device_count() )
snake_case_ :Tuple = replicate(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :int = shard(snake_case )
snake_case_ :List[str] = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :int = images[0, 253:256, 253:256, -1]
snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Optional[int] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 66 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "markuplm"
def __init__( self: int , UpperCamelCase: List[Any]=3_05_22 , UpperCamelCase: str=7_68 , UpperCamelCase: Dict=12 , UpperCamelCase: List[str]=12 , UpperCamelCase: Tuple=30_72 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: str=0.1 , UpperCamelCase: Optional[int]=5_12 , UpperCamelCase: List[Any]=2 , UpperCamelCase: List[str]=0.02 , UpperCamelCase: Dict=1e-1_2 , UpperCamelCase: Dict=0 , UpperCamelCase: Optional[Any]=0 , UpperCamelCase: Tuple=2 , UpperCamelCase: Optional[Any]=2_56 , UpperCamelCase: int=10_24 , UpperCamelCase: List[str]=2_16 , UpperCamelCase: Any=10_01 , UpperCamelCase: Tuple=32 , UpperCamelCase: List[str]=50 , UpperCamelCase: Dict="absolute" , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=None , **UpperCamelCase: Tuple , ):
"""simple docstring"""
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
# additional properties
A__ = max_depth
A__ = max_xpath_tag_unit_embeddings
A__ = max_xpath_subs_unit_embeddings
A__ = tag_pad_id
A__ = subs_pad_id
A__ = xpath_unit_hidden_size
| 69 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Tuple = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Tuple = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
A__ : Dict = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
A__ : List[Any] = 0
A__ : Optional[int] = 0XE000
A__ : Union[str, Any] = 0XE001
A__ : int = 0XE002
A__ : Optional[Any] = 0XE003
A__ : Dict = 0XE004
# Maps special codepoints to human-readable names.
A__ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
A__ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __snake_case ( UpperCamelCase_ ):
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , A_ : List[str]=chr(A_) , A_ : Union[str, Any]=chr(A_) , A_ : List[Any]=chr(A_) , A_ : Union[str, Any]=chr(A_) , A_ : Tuple=chr(A_) , A_ : Optional[int]=chr(A_) , A_ : List[Any]=False , A_ : Optional[Any]=2_0_4_8 , **A_ : Dict , ):
lowerCAmelCase_ : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else bos_token
lowerCAmelCase_ : str = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else eos_token
lowerCAmelCase_ : List[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else sep_token
lowerCAmelCase_ : int = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else cls_token
lowerCAmelCase_ : Tuple = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Tuple = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else mask_token
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , model_max_length=A_ , **A_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase_ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase_ : List[str] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase_ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase_ : Tuple = UNICODE_VOCAB_SIZE
lowerCAmelCase_ : List[str] = len(self._special_codepoints)
@property
def UpperCAmelCase__ ( self : str):
return self._unicode_vocab_size
def UpperCAmelCase__ ( self : List[str] , A_ : str):
return list(A_)
def UpperCAmelCase__ ( self : str , A_ : str):
try:
return ord(A_)
except TypeError:
raise ValueError(F"""invalid token: '{token}'""")
def UpperCAmelCase__ ( self : str , A_ : int):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(A_)
except TypeError:
raise ValueError(F"""invalid id: {index}""")
def UpperCAmelCase__ ( self : str , A_ : List[str]):
return "".join(A_)
def UpperCAmelCase__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
lowerCAmelCase_ : Dict = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase__ ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_)
lowerCAmelCase_ : Dict = [1] + ([0] * len(A_)) + [1]
if token_ids_a is not None:
result += ([0] * len(A_)) + [1]
return result
def UpperCAmelCase__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : Any = [self.sep_token_id]
lowerCAmelCase_ : int = [self.cls_token_id]
lowerCAmelCase_ : Optional[int] = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def UpperCAmelCase__ ( self : Optional[Any] , A_ : str , A_ : Optional[str] = None):
return ()
| 103 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=__UpperCamelCase ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' ,type=__UpperCamelCase ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=__UpperCamelCase )
return parser.parse_args()
def UpperCamelCase( ):
lowerCAmelCase_ : str = parse_args()
# Import training_script as a module.
lowerCAmelCase_ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase_ : Tuple = script_fpath.stem
lowerCAmelCase_ : Union[str, Any] = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
lowerCAmelCase_ : Optional[int] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 103 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=__snake_case ):
__lowerCamelCase = ['flax', 'transformers']
def __init__( self :Tuple , *_lowercase :Tuple , **_lowercase :int ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , *_lowercase :Any , **_lowercase :Dict ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase ( cls :Optional[int] , *_lowercase :str , **_lowercase :Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class lowerCAmelCase ( metaclass=__snake_case ):
__lowerCamelCase = ['flax', 'transformers']
def __init__( self :Tuple , *_lowercase :Union[str, Any] , **_lowercase :Any ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase ( cls :int , *_lowercase :Optional[int] , **_lowercase :int ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase ( cls :Optional[int] , *_lowercase :List[str] , **_lowercase :int ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class lowerCAmelCase ( metaclass=__snake_case ):
__lowerCamelCase = ['flax', 'transformers']
def __init__( self :int , *_lowercase :int , **_lowercase :Tuple ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase ( cls :Tuple , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , *_lowercase :Tuple , **_lowercase :Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
class lowerCAmelCase ( metaclass=__snake_case ):
__lowerCamelCase = ['flax', 'transformers']
def __init__( self :Tuple , *_lowercase :Optional[int] , **_lowercase :Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase ( cls :List[Any] , *_lowercase :List[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase ( cls :str , *_lowercase :List[str] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["flax", "transformers"] )
| 362 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_snake_case = namedtuple("""covid_data""", """cases deaths recovered""")
def _A ( __magic_name__ = "https://www.worldometers.info/coronavirus/" ):
lowercase__ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__magic_name__ ).content ).xpath(__magic_name__ ) )
_snake_case = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 201 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: # noqa: E741
A__ = len(lowercase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , lowercase_ )
return out_edge_count
for i in range(lowercase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(lowercase_ , lowercase_ , -1 , lowercase_ )
A__ = out_edge_count > 1
for x in range(len(lowercase_ ) ):
if is_art[x] is True:
print(lowercase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 247 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] , snake_case_ : str , snake_case_ : bool , snake_case_ : str = None , snake_case_ : list = None ) -> Tuple:
'''simple docstring'''
A__ = None
A__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
A__ = os.path.abspath("examples" )
for item in os.listdir(snake_case_ ):
if item not in EXCLUDE_EXAMPLES:
A__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ) and ".py" in item_path:
with self.subTest(
tested_script=snake_case_ , feature_script=snake_case_ , tested_section="main()" if parser_only else "training_function()" , ):
A__ = compare_against_test(
os.path.join(snake_case_ , snake_case_ ) , snake_case_ , snake_case_ , snake_case_ )
A__ = "\n".join(snake_case_ )
if special_strings is not None:
for string in special_strings:
A__ = diff.replace(snake_case_ , "" )
self.assertEqual(snake_case_ , "" )
def __magic_name__ ( self : List[str] ) -> str:
'''simple docstring'''
self.one_complete_example("complete_nlp_example.py" , snake_case_ )
self.one_complete_example("complete_nlp_example.py" , snake_case_ )
def __magic_name__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
A__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , snake_case_ , snake_case_ , snake_case_ )
self.one_complete_example("complete_cv_example.py" , snake_case_ , snake_case_ , snake_case_ )
@mock.patch.dict(os.environ, {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = False
@classmethod
def __magic_name__ ( cls : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
A__ = tempfile.mkdtemp()
A__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
A__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __magic_name__ ( cls : Dict ) -> str:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
A__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def __magic_name__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
self.assertNotIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
else:
self.assertIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
@slow
def __magic_name__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
A__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
A__ = re.findall("({.+})" , snake_case_ )
A__ = [r for r in results if "accuracy" in r][-1]
A__ = ast.literal_eval(snake_case_ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def __magic_name__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
A__ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , "tracking" ) ) )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def __magic_name__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 247 | 1 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase :
'''simple docstring'''
a_ : str
a_ : str = None
@staticmethod
def lowerCamelCase ( ):
raise NotImplementedError
def lowerCamelCase ( self : Dict , a_ : Dict , a_ : int , a_ : str , **a_ : Tuple ):
raise NotImplementedError
def lowerCamelCase ( self : List[str] , a_ : Dict ):
raise NotImplementedError
def lowerCamelCase ( self : Any ):
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase ( cls : Tuple ):
return f'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = """optuna"""
@staticmethod
def lowerCamelCase ( ):
return is_optuna_available()
def lowerCamelCase ( self : Optional[Any] , a_ : Any , a_ : int , a_ : str , **a_ : Tuple ):
return run_hp_search_optuna(a_ , a_ , a_ , **a_ )
def lowerCamelCase ( self : Tuple , a_ : List[Any] ):
return default_hp_space_optuna(a_ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Tuple = """ray"""
a_ : str = """'ray[tune]'"""
@staticmethod
def lowerCamelCase ( ):
return is_ray_available()
def lowerCamelCase ( self : Union[str, Any] , a_ : Optional[int] , a_ : int , a_ : str , **a_ : Optional[int] ):
return run_hp_search_ray(a_ , a_ , a_ , **a_ )
def lowerCamelCase ( self : Optional[int] , a_ : int ):
return default_hp_space_ray(a_ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[Any] = """sigopt"""
@staticmethod
def lowerCamelCase ( ):
return is_sigopt_available()
def lowerCamelCase ( self : Tuple , a_ : Optional[Any] , a_ : int , a_ : str , **a_ : List[Any] ):
return run_hp_search_sigopt(a_ , a_ , a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[Any] ):
return default_hp_space_sigopt(a_ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = """wandb"""
@staticmethod
def lowerCamelCase ( ):
return is_wandb_available()
def lowerCamelCase ( self : Optional[Any] , a_ : Dict , a_ : int , a_ : str , **a_ : Any ):
return run_hp_search_wandb(a_ , a_ , a_ , **a_ )
def lowerCamelCase ( self : Union[str, Any] , a_ : Dict ):
return default_hp_space_wandb(a_ )
lowercase__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__UpperCamelCase ) > 0:
lowerCAmelCase_ : Optional[int] = available_backends[0].name
if len(__UpperCamelCase ) > 1:
logger.info(
f'''{len(__UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 161 |
"""simple docstring"""
from __future__ import annotations
import math
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , a_ : int ):
lowerCAmelCase_ : Union[str, Any] = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Union[str, Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : int = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCamelCase ( self : List[Any] , a_ : int ):
return idx * 2
def lowerCamelCase ( self : Tuple , a_ : int ):
return idx * 2 + 1
def lowerCamelCase ( self : Tuple , a_ : int , a_ : int , a_ : int , a_ : list[int] ):
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : Tuple = (left_element + right_element) // 2
self.build(self.left(a_ ) , a_ , a_ , a_ )
self.build(self.right(a_ ) , mid + 1 , a_ , a_ )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(a_ )] , self.segment_tree[self.right(a_ )] )
def lowerCamelCase ( self : Union[str, Any] , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int ):
if self.flag[idx] is True:
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : str = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : Dict = val
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(a_ ) , a_ , a_ , a_ , a_ , a_ )
self.update(self.right(a_ ) , mid + 1 , a_ , a_ , a_ , a_ )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(a_ )] , self.segment_tree[self.right(a_ )] )
return True
def lowerCamelCase ( self : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int ):
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = False
if left_element != right_element:
lowerCAmelCase_ : int = self.lazy[idx]
lowerCAmelCase_ : int = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Dict = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : Any = (left_element + right_element) // 2
lowerCAmelCase_ : Union[str, Any] = self.query(self.left(a_ ) , a_ , a_ , a_ , a_ )
lowerCAmelCase_ : List[str] = self.query(self.right(a_ ) , mid + 1 , a_ , a_ , a_ )
return max(a_ , a_ )
def __str__( self : str ):
return str([self.query(1 , 1 , self.size , a_ , a_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowercase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ = 15
lowercase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 161 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = DistilBertTokenizer
UpperCAmelCase__ = DistilBertTokenizerFast
UpperCAmelCase__ = True
@slow
def A_ ( self : str ) -> Any:
lowerCamelCase__ : List[str] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
lowerCamelCase__ : Dict = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 50 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
import warnings
from .generation import TFGenerationMixin
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase__ , )
| 70 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowerCAmelCase : Any = (3, 9, -11, 0, 7, 5, 1, -1)
_lowerCAmelCase : Any = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ :
"""simple docstring"""
def __init__( self :str , snake_case :Iterable[int] ):
'''simple docstring'''
A_ : Node | None = None
for i in sorted(snake_case , reverse=snake_case ):
A_ : str = Node(snake_case , self.head )
def __iter__( self :Any ):
'''simple docstring'''
A_ : List[Any] = self.head
while node:
yield node.data
A_ : Optional[int] = node.next_node
def __len__( self :Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self :Tuple ):
'''simple docstring'''
return " -> ".join([str(snake_case ) for node in self] )
def __snake_case ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 70 | 1 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
if args.student_type == "roberta":
A: Union[str, Any] = False
elif args.student_type == "gpt2":
A: str = False
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
if args.student_type == "roberta":
A: Optional[Any] = False
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: str = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__lowercase , required=__lowercase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__lowercase , required=__lowercase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__lowercase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__lowercase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__lowercase , required=__lowercase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__lowercase , type=__lowercase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__lowercase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__lowercase , required=__lowercase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__lowercase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__lowercase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__lowercase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__lowercase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__lowercase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__lowercase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=__lowercase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__lowercase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__lowercase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__lowercase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__lowercase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__lowercase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__lowercase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__lowercase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__lowercase , default=5_0 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=__lowercase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=__lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__lowercase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=__lowercase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__lowercase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__lowercase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__lowercase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__lowercase , default=5_6 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__lowercase , default=5_0_0 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__lowercase , default=4_0_0_0 , help='''Checkpoint interval.''' )
A: Tuple = parser.parse_args()
sanity_checks(__lowercase )
# ARGS #
init_gpu_params(__lowercase )
set_seed(__lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__lowercase ) , __lowercase , indent=4 )
git_log(args.dump_path )
A , A , A: int = MODEL_CLASSES[args.student_type]
A , A , A: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
A: Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
A: Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
A: List[Any] = tokenizer.all_special_tokens.index(__lowercase )
A: Tuple = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
A: List[str] = special_tok_ids
A: Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
A: int = pickle.load(__lowercase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
A: Any = pickle.load(__lowercase )
A: Dict = np.maximum(__lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
A: int = 0.0 # do not predict special tokens
A: int = torch.from_numpy(__lowercase )
else:
A: Tuple = None
A: Optional[int] = LmSeqsDataset(params=__lowercase , data=__lowercase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
A: Dict = student_config_class.from_pretrained(args.student_config )
A: Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
A: List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowercase )
else:
A: Any = student_model_class(__lowercase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
A: Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowercase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowercase , __lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowercase , __lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
A: Optional[int] = Distiller(
params=__lowercase , dataset=__lowercase , token_probs=__lowercase , student=__lowercase , teacher=__lowercase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 319 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 1 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase__ ( yaml.SafeLoader ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase_ : str = [tuple(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else key for key in keys]
lowerCAmelCase_ : Optional[Any] = Counter(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=False ):
lowerCAmelCase_ : int = super().construct_mapping(SCREAMING_SNAKE_CASE_ , deep=SCREAMING_SNAKE_CASE_ )
self._check_no_duplicates_on_constructed_node(SCREAMING_SNAKE_CASE_ )
return mapping
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Tuple[Optional[str], str]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase_ : List[str] = full_content[1:].index('---' ) + 1
lowerCAmelCase_ : Tuple = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCAmelCase )
class UpperCamelCase__ ( __UpperCamelCase ):
# class attributes
_SCREAMING_SNAKE_CASE = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , SCREAMING_SNAKE_CASE_ : Path ):
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as readme_file:
lowerCAmelCase_ : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(SCREAMING_SNAKE_CASE_ )
else:
return cls()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Path ):
if path.exists():
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as readme_file:
lowerCAmelCase_ : Any = readme_file.read()
else:
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : List[str] = self._to_readme(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if readme_content is not None:
lowerCAmelCase_ : int = _split_yaml_from_readme(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowerCAmelCase_ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Any = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase_ : Tuple = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=SCREAMING_SNAKE_CASE_ , allow_unicode=SCREAMING_SNAKE_CASE_ , encoding='utf-8' , ).decode('utf-8' )
lowercase__ : Tuple = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase__ : Tuple = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
lowercase__ : str = ap.parse_args()
lowercase__ : Optional[int] = Path(args.readme_filepath)
lowercase__ : Union[str, Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 351 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase__ : List[Any] = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """masked_bert"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : int=7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1E-12 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Optional[int]="constant" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : str = pruning_method
lowerCAmelCase_ : Optional[Any] = mask_init
lowerCAmelCase_ : int = mask_scale
| 289 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.