code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
SCREAMING_SNAKE_CASE__ = [8, 5, 9, 7]
SCREAMING_SNAKE_CASE__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
SCREAMING_SNAKE_CASE__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
UpperCamelCase = claim_vector
UpperCamelCase = allocated_resources_table
UpperCamelCase = maximum_claim_table
def A__ ( self ) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A__ ( self ) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A__ ( self ) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_SCREAMING_SNAKE_CASE ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A__ ( self ) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE ): i for i in self.__need()}
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = self.__need()
UpperCamelCase = self.__allocated_resources_table
UpperCamelCase = self.__available_resources()
UpperCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
UpperCamelCase = False
for each_need in need_list:
UpperCamelCase = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE ):
if need > available_resources[index]:
UpperCamelCase = False
break
if execution:
UpperCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE )
# update available/freed resources stack
UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_SCREAMING_SNAKE_CASE ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def A__ ( self ) -> Any:
"""simple docstring"""
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_SCREAMING_SNAKE_CASE ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_SCREAMING_SNAKE_CASE ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 1
|
def UpperCamelCase__( UpperCamelCase__ : int )->int:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
A__ = 0
A__ = str(UpperCamelCase__ )
while len(UpperCamelCase__ ) != 1:
A__ = [int(UpperCamelCase__ ) for i in num_string]
A__ = 1
for i in range(0 , len(UpperCamelCase__ ) ):
total *= numbers[i]
A__ = str(UpperCamelCase__ )
steps += 1
return steps
def UpperCamelCase__( UpperCamelCase__ : int )->int:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
A__ = 0
A__ = str(UpperCamelCase__ )
while len(UpperCamelCase__ ) != 1:
A__ = [int(UpperCamelCase__ ) for i in num_string]
A__ = 0
for i in range(0 , len(UpperCamelCase__ ) ):
total += numbers[i]
A__ = str(UpperCamelCase__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__SCREAMING_SNAKE_CASE = '''CIDAS/clipseg-rd64-refined'''
__SCREAMING_SNAKE_CASE = '''image_segmenter'''
__SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
__SCREAMING_SNAKE_CASE = ['''image''', '''text''']
__SCREAMING_SNAKE_CASE = ['''image''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''vision'''] )
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
return self.pre_processor(text=[label],images=[image],padding=__lowerCamelCase,return_tensors='''pt''' )
def UpperCamelCase ( self,__lowerCamelCase ):
with torch.no_grad():
A__ = self.model(**__lowerCamelCase ).logits
return logits
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = outputs.cpu().detach().numpy()
A__ = 0
A__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 39
| 1
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCamelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _distribute_shards(**UpperCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _split_gen_kwargs(UpperCamelCase_ , UpperCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if expected is RuntimeError:
with pytest.raises(UpperCamelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(UpperCamelCase_ )
assert out == expected
| 100
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
UpperCAmelCase__ = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = EfficientNetConfig()
_UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = 10_00
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,)
return preprocessor
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase = sorted(set(lowercase ) )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )}
_UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase = """efficientnet.""" + item[1]
_UpperCAmelCase = """classifier.weight"""
_UpperCAmelCase = """classifier.bias"""
return key_mapping
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
_UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) )
else:
_UpperCAmelCase = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = model_classes[model_name](
include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,)
_UpperCAmelCase = original_model.trainable_variables
_UpperCAmelCase = original_model.non_trainable_variables
_UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase = param.numpy()
_UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase = get_efficientnet_config(lowercase )
_UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval()
_UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase = rename_keys(lowercase )
replace_params(lowercase ,lowercase ,lowercase )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase = convert_image_processor(lowercase )
_UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase = hf_model(**lowercase )
_UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase = False
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
_UpperCAmelCase = image.img_to_array(lowercase )
_UpperCAmelCase = np.expand_dims(lowercase ,axis=0 )
_UpperCAmelCase = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
UpperCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 289
| 0
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase : int = None
try:
import msvcrt
except ImportError:
__UpperCamelCase : Optional[int] = None
try:
import fcntl
except ImportError:
__UpperCamelCase : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase : List[str] = OSError
# Data
# ------------------------------------------------
__UpperCamelCase : List[Any] = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__UpperCamelCase : Optional[Any] = '''3.0.12'''
__UpperCamelCase : Union[str, Any] = None
def __SCREAMING_SNAKE_CASE ( ):
global _logger
lowerCAmelCase__ : List[Any] = _logger or logging.getLogger(__name__ )
return _logger
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Dict ,lowercase_ : List[str] ):
lowerCAmelCase__ : Any = lock_file
return None
def __str__( self : Optional[int] ):
lowerCAmelCase__ : Dict = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : List[str] = lock
return None
def __enter__( self : Union[str, Any] ):
return self.lock
def __exit__( self : Optional[Any] ,lowercase_ : List[Any] ,lowercase_ : Dict ,lowercase_ : Any ):
self.lock.release()
return None
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] ,lowercase_ : Optional[Any] ,lowercase_ : Tuple=-1 ,lowercase_ : Optional[int]=None ):
lowerCAmelCase__ : Tuple = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
lowerCAmelCase__ : str = self.hash_filename_if_too_long(lowercase_ ,lowercase_ )
# The path to the lock file.
lowerCAmelCase__ : int = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCAmelCase__ : Union[str, Any] = None
# The default timeout value.
lowerCAmelCase__ : List[Any] = timeout
# We use this lock primarily for the lock counter.
lowerCAmelCase__ : Dict = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCAmelCase__ : Optional[int] = 0
return None
@property
def __lowerCAmelCase ( self : List[str] ):
return self._lock_file
@property
def __lowerCAmelCase ( self : Optional[int] ):
return self._timeout
@timeout.setter
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Tuple ):
lowerCAmelCase__ : Optional[Any] = float(lowercase_ )
return None
def __lowerCAmelCase ( self : List[str] ):
raise NotImplementedError()
def __lowerCAmelCase ( self : Optional[int] ):
raise NotImplementedError()
@property
def __lowerCAmelCase ( self : List[Any] ):
return self._lock_file_fd is not None
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : int=None ,lowercase_ : Optional[int]=0.05 ):
if timeout is None:
lowerCAmelCase__ : str = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCAmelCase__ : List[Any] = id(self )
lowerCAmelCase__ : Union[str, Any] = self._lock_file
lowerCAmelCase__ : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(lowercase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCAmelCase__ : List[Any] = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowerCAmelCase ( self : Dict ,lowercase_ : List[Any]=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCAmelCase__ : Tuple = id(self )
lowerCAmelCase__ : Optional[int] = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
lowerCAmelCase__ : List[Any] = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[str] ):
self.acquire()
return self
def __exit__( self : List[str] ,lowercase_ : Union[str, Any] ,lowercase_ : Optional[int] ,lowercase_ : Optional[int] ):
self.release()
return None
def __del__( self : int ):
self.release(force=lowercase_ )
return None
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : int ):
lowerCAmelCase__ : Any = os.path.basename(lowercase_ )
if len(lowercase_ ) > max_length and max_length > 0:
lowerCAmelCase__ : int = os.path.dirname(lowercase_ )
lowerCAmelCase__ : Optional[Any] = str(hash(lowercase_ ) )
lowerCAmelCase__ : Union[str, Any] = filename[: max_length - len(lowercase_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowercase_ ,lowercase_ )
else:
return path
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Any ,lowercase_ : Union[str, Any] ,lowercase_ : List[Any]=-1 ,lowercase_ : Optional[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ ,timeout=lowercase_ ,max_filename_length=lowercase_ )
lowerCAmelCase__ : List[Any] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCAmelCase__ : str = os.open(self._lock_file ,lowercase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(lowercase_ )
else:
lowerCAmelCase__ : List[str] = fd
return None
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[str] = self._lock_file_fd
lowerCAmelCase__ : Optional[Any] = None
msvcrt.locking(lowercase_ ,msvcrt.LK_UNLCK ,1 )
os.close(lowercase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,lowercase_ : List[str] ,lowercase_ : Optional[Any]=-1 ,lowercase_ : List[Any]=None ):
lowerCAmelCase__ : int = os.statvfs(os.path.dirname(lowercase_ ) ).f_namemax
super().__init__(lowercase_ ,timeout=lowercase_ ,max_filename_length=lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCAmelCase__ : List[Any] = os.open(self._lock_file ,lowercase_ )
try:
fcntl.flock(lowercase_ ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowercase_ )
else:
lowerCAmelCase__ : str = fd
return None
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Any = self._lock_file_fd
lowerCAmelCase__ : Dict = None
fcntl.flock(lowercase_ ,fcntl.LOCK_UN )
os.close(lowercase_ )
return None
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCAmelCase__ : Union[str, Any] = os.open(self._lock_file ,lowercase_ )
except OSError:
pass
else:
lowerCAmelCase__ : Any = fd
return None
def __lowerCAmelCase ( self : Tuple ):
os.close(self._lock_file_fd )
lowerCAmelCase__ : Tuple = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase : Union[str, Any] = None
if msvcrt:
__UpperCamelCase : List[str] = WindowsFileLock
elif fcntl:
__UpperCamelCase : int = UnixFileLock
else:
__UpperCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 369
|
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__UpperCamelCase : List[Any] = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BlenderbotSmallTokenizer
def __init__( self : Dict ,lowercase_ : Dict=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : Any="<|endoftext|>" ,lowercase_ : Optional[Any]="<|endoftext|>" ,lowercase_ : Dict="<|endoftext|>" ,lowercase_ : Optional[int]=False ,lowercase_ : Union[str, Any]=True ,**lowercase_ : Union[str, Any] ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ ,merges=lowercase_ ,add_prefix_space=lowercase_ ,trim_offsets=lowercase_ ,) ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : Tuple = add_prefix_space
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[int] ,lowercase_ : int=None ):
lowerCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 74
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "microsoft/speecht5_tts"
snake_case__ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
snake_case__ = "text_reader"
snake_case__ = SpeechTaProcessor
snake_case__ = SpeechTaForTextToSpeech
snake_case__ = SpeechTaHifiGan
snake_case__ = ["text"]
snake_case__ = ["audio"]
def __lowerCAmelCase ( self : Dict ):
if self.post_processor is None:
UpperCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict=None ):
UpperCAmelCase__ = self.pre_processor(text=lowerCamelCase__ ,return_tensors='pt' ,truncation=lowerCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
UpperCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' ,split='validation' )
UpperCAmelCase__ = torch.tensor(embeddings_dataset[7_305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ):
with torch.no_grad():
return self.model.generate_speech(**lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Optional[int] ):
with torch.no_grad():
return self.post_processor(lowerCamelCase__ ).cpu().detach()
| 98
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
| 1
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase : Optional[Any] = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
UpperCAmelCase : Optional[int] = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = """ Hello world! cécé herlolip"""
UpperCAmelCase : int = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def _A ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : Optional[int] =[
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : Any =dct.pop(__snake_case )
a__ : Dict =val
def _A ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
a__ : str =torch.load(__snake_case , map_location="cpu" )
a__ : Tuple =torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
a__ , a__ : List[Any] =emb.weight.shape
a__ : Any =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
a__ : List[str] =emb.weight.data
return lin_layer
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
if not os.path.exists(__snake_case ):
a__ : str =torch.hub.load("pytorch/fairseq" , __snake_case ).eval()
else:
a__ : List[str] =load_xsum_checkpoint(__snake_case )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
a__ : Dict =checkpoint_path.replace("." , "-" )
a__ : Union[str, Any] =BartConfig.from_pretrained(__snake_case )
a__ : Dict =bart.encode(__snake_case ).unsqueeze(0 )
a__ : Union[str, Any] =BartTokenizer.from_pretrained(__snake_case ).encode(__snake_case , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__snake_case , __snake_case ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
a__ : int =bart.state_dict()
remove_ignore_keys_(__snake_case )
a__ : List[str] =state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
a__ : List[Any] =BartForSequenceClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
a__ : List[Any] =bart.predict("mnli" , __snake_case , return_logits=__snake_case )
a__ : Optional[int] =model(__snake_case )[0] # logits
else: # no classification heads to worry about
a__ : Union[str, Any] =bart.model.state_dict()
remove_ignore_keys_(__snake_case )
a__ : Union[str, Any] =state_dict["decoder.embed_tokens.weight"]
a__ : Optional[int] =bart.extract_features(__snake_case )
if hf_checkpoint_name == "facebook/bart-large":
a__ : Optional[Any] =BartModel(__snake_case ).eval()
model.load_state_dict(__snake_case )
a__ : Optional[int] =model(__snake_case ).model[0]
else:
a__ : List[Any] =BartForConditionalGeneration(__snake_case ).eval() # an existing summarization ckpt
model.model.load_state_dict(__snake_case )
if hasattr(__snake_case , "lm_head" ):
a__ : Union[str, Any] =make_linear_from_emb(model.model.shared )
a__ : Dict =model.model(__snake_case )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 356
|
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : int =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(lowerCAmelCase__ ) != 0:
a__ : List[str] =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
a__ : List[Any] =rows
else:
a__ : str =[]
def _lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def _lowercase ( self ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _lowercase ( self ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : str =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowercase ( self ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : List[str] =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Dict =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Union[str, Any] =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(lowerCAmelCase__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : List[str] =TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
a__ : Tuple =self.rows[0:position] + [row] + self.rows[position:]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : str =TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
a__ : Optional[Any] =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
a__ : Any =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
a__ : Tuple =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
| 0
|
"""simple docstring"""
from PIL import Image
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Image:
def brightness(__lowerCamelCase ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
lowerCAmelCase_ = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 16
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 2_5_6
class a_ ( lowerCamelCase ):
lowercase = ["""melgan"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase = 4.0 # Largest value for most examples
UpperCamelCase = 128
self.register_modules(
notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = output_range
if clip:
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = input_range
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = input_tokens > 0
UpperCamelCase ,UpperCamelCase = self.notes_encoder(
encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.continuous_encoder(
encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = noise_time
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.decoder(
encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE )
return logits
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_SCREAMING_SNAKE_CASE )}." )
UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase = ones
UpperCamelCase = self.scale_features(
_SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase = self.decode(
encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] )
UpperCamelCase = mel[:1]
UpperCamelCase = mel.cpu().float().numpy()
UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
| 321
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowercase = ['''gpt2''']
__lowercase = '''gpt2'''
if is_tf_available():
class lowerCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , __lowercase) -> List[str]:
super().__init__()
__UpperCamelCase :Any = tokenizer
__UpperCamelCase :Tuple = AutoConfig.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = TFGPTaLMHeadModel.from_config(__lowercase)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text'''),))
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Tuple = self.tokenizer(__lowercase)
__UpperCamelCase :int = tokenized['''input_ids'''].to_tensor()
__UpperCamelCase :int = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCamelCase :int = self.model(input_ids=__lowercase , attention_mask=__lowercase)['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Tuple:
super().setUp()
__UpperCamelCase :Tuple = [GPTaTokenizer.from_pretrained(__lowercase) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCamelCase :List[str] = [TFGPTaTokenizer.from_pretrained(__lowercase) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
__UpperCamelCase :List[Any] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCamelCase :Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1]))
def UpperCamelCase__ ( self) -> Tuple:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
__UpperCamelCase :Any = tokenizer([test_inputs] , return_tensors='''tf''')
__UpperCamelCase :str = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCamelCase :Optional[Any] = python_outputs[key].numpy()
__UpperCamelCase :Optional[int] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(__lowercase , tf.intaa) == tf_outputs_values))
@slow
def UpperCamelCase__ ( self) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :Dict = tf.function(__lowercase)
for test_inputs in self.test_sentences:
__UpperCamelCase :List[str] = tf.constant(__lowercase)
__UpperCamelCase :Optional[int] = compiled_tokenizer(__lowercase)
__UpperCamelCase :str = tf_tokenizer(__lowercase)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def UpperCamelCase__ ( self) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :Tuple = ModelToSave(tokenizer=__lowercase)
__UpperCamelCase :List[Any] = tf.convert_to_tensor([self.test_sentences[0]])
__UpperCamelCase :int = model.serving(__lowercase) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCamelCase :int = Path(__lowercase) / '''saved.model'''
tf.saved_model.save(__lowercase , __lowercase , signatures={'''serving_default''': model.serving})
__UpperCamelCase :Union[str, Any] = tf.saved_model.load(__lowercase)
__UpperCamelCase :Union[str, Any] = loaded_model.signatures['''serving_default'''](__lowercase)['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def UpperCamelCase__ ( self) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :str = tf.convert_to_tensor([self.test_sentences[0]])
__UpperCamelCase :Dict = tf_tokenizer(__lowercase) # Build model with some sample inputs
__UpperCamelCase :Dict = tf_tokenizer.get_config()
__UpperCamelCase :Tuple = TFGPTaTokenizer.from_config(__lowercase)
__UpperCamelCase :str = model_from_config(__lowercase)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def UpperCamelCase__ ( self) -> int:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCamelCase :Union[str, Any] = 123_123
for max_length in [3, 5, 1_024]:
__UpperCamelCase :Optional[int] = tf.convert_to_tensor([self.test_sentences[0]])
__UpperCamelCase :Tuple = tf_tokenizer(__lowercase , max_length=__lowercase)
__UpperCamelCase :Any = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 356
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 105
| 0
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase :
def __init__( self , _a ) -> Union[str, Any]:
_A : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A : Dict = len(_a ) - 1
def a__ ( self , _a ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _a ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_a ) , 5 ) == 1
return output_values
def a__ ( self , _a ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A : Optional[int] = self.basis_function(_a )
_A : List[str] = 0.0
_A : Tuple = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def a__ ( self , _a = 0.01 ) -> str:
from matplotlib import pyplot as plt # type: ignore
_A : list[float] = [] # x coordinates of points to plot
_A : list[float] = [] # y coordinates of points to plot
_A : str = 0.0
while t <= 1:
_A : Optional[Any] = self.bezier_curve_function(_a )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_A : Union[str, Any] = [i[0] for i in self.list_of_points]
_A : str = [i[1] for i in self.list_of_points]
plt.plot(
_a , _a , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_a , _a , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 26
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 213
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=32 , _a=16 , _a=3 , _a=True , _a=True , _a=32 , _a=4 , _a=[0, 1, 2, 3] , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.02 , _a=3 , _a=[1, 384, 24, 24] , _a=True , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = backbone_out_indices
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = backbone_featmap_shape
lowerCamelCase = scope
lowerCamelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_a , backbone_featmap_shape=self.backbone_featmap_shape , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = DPTModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.num_labels
lowerCamelCase = DPTForDepthEstimation(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.num_labels
lowerCamelCase = DPTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCamelCase = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = DPTModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = True
if model_class in get_values(_a ):
continue
lowerCamelCase = model_class(_a )
model.to(_a )
model.train()
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
lowerCamelCase = model(**_a ).loss
loss.backward()
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = False
lowerCamelCase = True
if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing:
continue
lowerCamelCase = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a )
lowerCamelCase = model(**_a ).loss
loss.backward()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = _config_zero_init(_a )
for model_class in self.all_model_classes:
lowerCamelCase = model_class(config=_a )
# Skip the check for the backbone
lowerCamelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCamelCase = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCamelCase = DPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = """add"""
with self.assertRaises(_a ):
lowerCamelCase = DPTForDepthEstimation(_a )
def a__ ( ) -> str:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
lowerCamelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_a )
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
lowerCamelCase = outputs.predicted_depth
# verify the predicted depth
lowerCamelCase = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _a )
lowerCamelCase = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _a , atol=1e-4 ) )
| 168
|
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ ) -> int:
return number | (1 << position)
def a__ ( snake_case__ , snake_case__ ) -> int:
return number & ~(1 << position)
def a__ ( snake_case__ , snake_case__ ) -> int:
return number ^ (1 << position)
def a__ ( snake_case__ , snake_case__ ) -> bool:
return ((number >> position) & 1) == 1
def a__ ( snake_case__ , snake_case__ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 1
|
__lowerCAmelCase : Any = range(2, 20 + 1)
__lowerCAmelCase : str = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def __magic_name__ ( A : Optional[int], A : List[Any], A : List[Any], A : Tuple ):
'''simple docstring'''
a = sum(a_i[j] for j in range(A, len(A ) ) )
a = sum(a_i[j] * base[j] for j in range(min(len(A ), A ) ) )
a , a = 0, 0
a = n - i
a = memo.get(A )
if sub_memo is not None:
a = sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
a = -1
for _k in range(len(A ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
a = _k
break
if max_jump >= 0:
a , a , a = jumps[max_jump]
# since the difference between jumps is cached, add c
a = diff + c
for j in range(min(A, len(A ) ) ):
a , a = divmod(A, 10 )
if new_c > 0:
add(A, A, A )
else:
a = []
else:
a = {c: []}
a = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
a , a = next_term(A, k - 1, i + dn, A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
a , a = compute(A, A, i + dn, A )
diff += _diff
dn += terms_jumped
a = sub_memo[c]
# keep jumps sorted by # of terms skipped
a = 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A, (diff, dn, k) )
return (diff, dn)
def __magic_name__ ( A : str, A : Union[str, Any], A : Optional[int], A : List[str] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
a = i
a , a , a = 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
a = ds_c + ds_b
diff += addend
a = 0
for j in range(A ):
a = a_i[j] + addend
a , a = divmod(A, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A, A, A )
return diff, i - start_i
def __magic_name__ ( A : int, A : Any, A : Optional[Any] ):
'''simple docstring'''
for j in range(A, len(A ) ):
a = digits[j] + addend
if s >= 10:
a , a = divmod(A, 10 )
a = addend // 10 + quotient
else:
a = s
a = addend // 10
if addend == 0:
break
while addend > 0:
a , a = divmod(A, 10 )
digits.append(A )
def __magic_name__ ( A : int = 10**15 ):
'''simple docstring'''
a = [1]
a = 1
a = 0
while True:
a , a = next_term(A, 20, i + dn, A )
dn += terms_jumped
if dn == n - i:
break
a = 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = GPTSwaTokenizer
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
a = GPTSwaTokenizer(__lowerCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Any:
a = "This is a test"
a = "This is a test"
return input_text, output_text
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
a = "<s>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__lowerCamelCase ) , 20_00 )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
a = GPTSwaTokenizer(__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
# fmt: off
self.assertListEqual(
__lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def __UpperCAmelCase ( self : List[Any] ) -> str:
a = GPTSwaTokenizer(__lowerCamelCase )
a = ["This is a test", "I was born in 92000, and this is falsé."]
a = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertListEqual(tokenizer.encode_fast(__lowerCamelCase ) , __lowerCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.decode_fast(__lowerCamelCase ) , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
a = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__lowerCamelCase , )
| 107
| 1
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Dict =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = DebertaVaTokenizer
__lowercase = DebertaVaTokenizerFast
__lowercase = True
__lowercase = True
def UpperCAmelCase_ ( self :int )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A__ = DebertaVaTokenizer(lowercase_ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self :Any , lowercase_ :Dict )-> int:
A__ = "this is a test"
A__ = "this is a test"
return input_text, output_text
def UpperCAmelCase_ ( self :str )-> Dict:
A__ = "<pad>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[str]:
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(lowercase_ ) , 3_00_01 )
def UpperCAmelCase_ ( self :List[Any] )-> str:
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCAmelCase_ ( self :Any )-> List[Any]:
# fmt: off
A__ = " \tHeLLo!how \n Are yoU? "
A__ = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
A__ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCAmelCase_ ( self :int )-> Any:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[Any]:
pass
def UpperCAmelCase_ ( self :Tuple )-> Any:
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A__ = DebertaVaTokenizer(lowercase_ , split_by_punct=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = DebertaVaTokenizerFast(lowercase_ , split_by_punct=lowercase_ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A__ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :str )-> int:
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
A__ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Any:
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A__ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :int )-> Dict:
# fmt: off
A__ = " \tHeLLo!how \n Are yoU? "
A__ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
A__ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[Any]:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "I was born in 92000, and this is falsé."
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Any:
A__ = "This is a test"
A__ = [13, 1, 43_98, 25, 21, 12_89]
A__ = ["▁", "T", "his", "▁is", "▁a", "▁test"]
A__ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
A__ = DebertaVaTokenizer(lowercase_ , keep_accents=lowercase_ )
A__ = DebertaVaTokenizerFast(lowercase_ , keep_accents=lowercase_ )
A__ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = rust_tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
A__ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
A__ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = rust_tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> List[Any]:
A__ = DebertaVaTokenizer(lowercase_ )
A__ = tokenizer.encode("sequence builders" )
A__ = tokenizer.encode("multi-sequence build" )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowercase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowercase_ , )
@slow
def UpperCAmelCase_ ( self :str )-> int:
# fmt: off
A__ = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 123
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ):
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCamelCase : Any ):
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : int=False ):
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 )
A__ = False
# load original model from timm
A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(_lowerCamelCase ).eval()
else:
A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(_lowerCamelCase ).unsqueeze(0 )
A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
A__ = model(_lowerCamelCase )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 123
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 172
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
_UpperCAmelCase = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
_UpperCAmelCase = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
_UpperCAmelCase = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
A_ : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
else:
A_ : Optional[Any] = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
return {"meteor": np.mean(lowercase )}
| 140
| 0
|
def _A ( __magic_name__ = 1000 ):
lowercase__ = 1, 1
lowercase__ = []
for i in range(1 , n + 1 ):
lowercase__ = prev_numerator + 2 * prev_denominator
lowercase__ = prev_numerator + prev_denominator
if len(str(__SCREAMING_SNAKE_CASE ) ) > len(str(__SCREAMING_SNAKE_CASE ) ):
result.append(__SCREAMING_SNAKE_CASE )
lowercase__ = numerator
lowercase__ = denominator
return len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 370
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys]
lowercase__ = Counter(_lowercase )
lowercase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase ( self :Any , _lowercase :str , _lowercase :Dict=False ):
'''simple docstring'''
lowercase__ = super().construct_mapping(_lowercase , deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def _A ( __magic_name__ ):
lowercase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ = full_content[1:].index("---" ) + 1
lowercase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__magic_name__ )
class lowerCAmelCase ( lowercase_ ):
# class attributes
__lowerCamelCase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls :Dict , _lowercase :Path ):
'''simple docstring'''
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ , lowercase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def UpperCAmelCase ( self :Any , _lowercase :Path ):
'''simple docstring'''
if path.exists():
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ = readme_file.read()
else:
lowercase__ = None
lowercase__ = self._to_readme(_lowercase )
with open(_lowercase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(_lowercase )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
lowercase__ , lowercase__ = _split_yaml_from_readme(_lowercase )
lowercase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowercase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowercase , allow_unicode=_lowercase , encoding="utf-8" , ).decode("utf-8" )
_snake_case = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_snake_case = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_snake_case = ap.parse_args()
_snake_case = Path(args.readme_filepath)
_snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 201
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110
|
def _UpperCAmelCase ( snake_case = 50 ):
"""simple docstring"""
_lowerCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 82
| 0
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=8 )-> int:
'''simple docstring'''
UpperCAmelCase : Any =h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase : int =w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __snake_case ( _lowerCAmelCase ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
UpperCAmelCase : Optional[int] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if latents is None:
UpperCAmelCase : str =randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase : List[Any] =latents.to(_lowercase )
UpperCAmelCase : Optional[Any] =latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , ) -> int:
'''simple docstring'''
UpperCAmelCase : str =len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
UpperCAmelCase : Union[str, Any] =self.tokenizer(
_lowercase , padding='''max_length''' , truncation=_lowercase , max_length=77 , return_attention_mask=_lowercase , add_special_tokens=_lowercase , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =text_inputs.input_ids
UpperCAmelCase : str =self.tokenizer(_lowercase , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_lowercase , _lowercase ):
UpperCAmelCase : int =self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[int] =text_input_ids.to(_lowercase )
UpperCAmelCase : Optional[Any] =text_inputs.attention_mask.to(_lowercase )
UpperCAmelCase , UpperCAmelCase : List[Any] =self.text_encoder(
input_ids=_lowercase , attention_mask=_lowercase )
UpperCAmelCase : Dict =prompt_embeds.repeat_interleave(_lowercase , dim=0 )
UpperCAmelCase : Union[str, Any] =text_encoder_hidden_states.repeat_interleave(_lowercase , dim=0 )
UpperCAmelCase : Union[str, Any] =text_mask.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Dict =42
if negative_prompt is None:
UpperCAmelCase : Optional[int] =[''''''] * batch_size
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !='''
f''' {type(_lowercase )}.''' )
elif isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Any =[negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase : Optional[Any] =negative_prompt
UpperCAmelCase : List[Any] =self.tokenizer(
_lowercase , padding='''max_length''' , max_length=77 , truncation=_lowercase , return_attention_mask=_lowercase , add_special_tokens=_lowercase , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =uncond_input.input_ids.to(_lowercase )
UpperCAmelCase : Dict =uncond_input.attention_mask.to(_lowercase )
UpperCAmelCase , UpperCAmelCase : Dict =self.text_encoder(
input_ids=_lowercase , attention_mask=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Dict =negative_prompt_embeds.shape[1]
UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , _lowercase )
UpperCAmelCase : int =negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase )
UpperCAmelCase : Optional[int] =uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase : List[str] =uncond_text_encoder_hidden_states.repeat(1 , _lowercase , 1 )
UpperCAmelCase : Dict =uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _lowercase , -1 )
UpperCAmelCase : Dict =uncond_text_mask.repeat_interleave(_lowercase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : Tuple =torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase : Any =torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase : Union[str, Any] =torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase__ ( self , snake_case__=0 ) -> int:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase : Union[str, Any] =torch.device(f'''cuda:{gpu_id}''' )
UpperCAmelCase : Optional[int] =[
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , snake_case__=0 ) -> str:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase : int =torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : Dict =None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase : List[Any] =cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
if self.safety_checker is not None:
UpperCAmelCase , UpperCAmelCase : List[Any] =cpu_offload_with_hook(self.safety_checker , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
UpperCAmelCase : Optional[Any] =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ) -> str:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Optional[int] =1
elif isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Optional[int] =len(_lowercase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_lowercase )}''' )
UpperCAmelCase : List[Any] =self._execution_device
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : Any =guidance_scale > 1.0
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple =self._encode_prompt(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Optional[int] =torch.cat(_lowercase , dim=0 )
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Optional[int] =torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : List[Any] =image_embeds.repeat_interleave(_lowercase , dim=0 )
UpperCAmelCase : Tuple =negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
UpperCAmelCase : List[Any] =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
UpperCAmelCase : Dict =self.scheduler.timesteps
UpperCAmelCase : str =self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase : Any =get_new_h_w(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase : List[Any] =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Union[str, Any] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : List[str] ={'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCAmelCase : List[Any] =self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase : int =noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase : Tuple =variance_pred.chunk(2 )
UpperCAmelCase : int =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : str =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase : Optional[Any] =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : List[str] =self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , ).prev_sample
# post-processing
UpperCAmelCase : List[str] =self.movq.decode(_lowercase , force_not_quantize=_lowercase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase : str =image * 0.5 + 0.5
UpperCAmelCase : Dict =image.clamp(0 , 1 )
UpperCAmelCase : Dict =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Union[str, Any] =self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 369
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : Union[str, Any] = """CLIPImageProcessor"""
__lowerCamelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case__ , )
UpperCAmelCase : int =kwargs.pop('''feature_extractor''' )
UpperCAmelCase : Tuple =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase : List[Any] =self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
UpperCAmelCase : Tuple =self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
UpperCAmelCase : List[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.tokenizer.model_input_names
UpperCAmelCase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case__ , )
return self.image_processor
| 78
| 0
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase : Tuple =random.Random()
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int]=1.0 ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : Dict=None ):
if rng is None:
lowercase_ :List[Any] = global_rng
lowercase_ :Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
def __init__( self : List[str] , lowercase : Tuple , lowercase : Tuple=7 , lowercase : List[Any]=400 , lowercase : Optional[Any]=2_000 , lowercase : List[str]=10 , lowercase : List[str]=160 , lowercase : Optional[Any]=8 , lowercase : List[Any]=0.0 , lowercase : str=4_000 , lowercase : Tuple=False , lowercase : int=True , ):
"""simple docstring"""
lowercase_ :Any = parent
lowercase_ :Tuple = batch_size
lowercase_ :Any = min_seq_length
lowercase_ :Dict = max_seq_length
lowercase_ :str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ :Tuple = padding_value
lowercase_ :Dict = sampling_rate
lowercase_ :Any = return_attention_mask
lowercase_ :Tuple = do_normalize
lowercase_ :Optional[Any] = feature_size
lowercase_ :int = chunk_length
lowercase_ :str = hop_length
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : int , lowercase : List[Any]=False , lowercase : Dict=False ):
"""simple docstring"""
def _flatten(lowercase : List[str] ):
return list(itertools.chain(*lowercase ) )
if equal_length:
lowercase_ :List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ :Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ :int = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = WhisperFeatureExtractor if is_speech_available() else None
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Optional[Any] = WhisperFeatureExtractionTester(self )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ :str = feat_extract_first.save_pretrained(lowercase )[0]
check_json_file_has_correct_format(lowercase )
lowercase_ :List[str] = self.feature_extraction_class.from_pretrained(lowercase )
lowercase_ :Optional[int] = feat_extract_first.to_dict()
lowercase_ :Optional[Any] = feat_extract_second.to_dict()
lowercase_ :str = feat_extract_first.mel_filters
lowercase_ :Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase , lowercase ) )
self.assertEqual(lowercase , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ :str = os.path.join(lowercase , "feat_extract.json" )
feat_extract_first.to_json_file(lowercase )
lowercase_ :Dict = self.feature_extraction_class.from_json_file(lowercase )
lowercase_ :Optional[Any] = feat_extract_first.to_dict()
lowercase_ :Dict = feat_extract_second.to_dict()
lowercase_ :Union[str, Any] = feat_extract_first.mel_filters
lowercase_ :str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase , lowercase ) )
self.assertEqual(lowercase , lowercase )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ :str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ :Any = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
lowercase_ :Optional[int] = feature_extractor(lowercase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase_ :List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
lowercase_ :Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
# Test batched
lowercase_ :str = feature_extractor(lowercase , return_tensors="np" ).input_features
lowercase_ :List[Any] = feature_extractor(lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ :Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ :Dict = np.asarray(lowercase )
lowercase_ :str = feature_extractor(lowercase , return_tensors="np" ).input_features
lowercase_ :int = feature_extractor(lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
# Test truncation required
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowercase_ :Dict = [np.asarray(lowercase ) for speech_input in speech_inputs]
lowercase_ :List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase_ :List[str] = [np.asarray(lowercase ) for speech_input in speech_inputs_truncated]
lowercase_ :int = feature_extractor(lowercase , return_tensors="np" ).input_features
lowercase_ :Dict = feature_extractor(lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
import torch
lowercase_ :Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
lowercase_ :List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ :Any = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase_ :Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self : Optional[Any] , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase_ :List[str] = ds.sort("id" ).select(range(lowercase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowercase_ :Any = self._load_datasamples(1 )
lowercase_ :List[Any] = WhisperFeatureExtractor()
lowercase_ :str = feature_extractor(lowercase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase , atol=1e-4 ) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Union[str, Any] = self._load_datasamples(1 )[0]
lowercase_ :Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowercase_ :Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase )[0]
self.assertTrue(np.all(np.mean(lowercase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase ) - 1 ) < 1e-3 ) )
| 223
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]="pt" ):
lowercase_ :Dict = {"add_prefix_space": True} if isinstance(__lowerCamelCase ,__lowerCamelCase ) and not line.startswith(" " ) else {}
lowercase_ :str = padding_side
return tokenizer(
[line] ,max_length=__lowerCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__lowerCamelCase ,return_tensors=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str=None ,):
lowercase_ :Optional[int] = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str="train" , lowercase : Dict=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : int="" , ):
"""simple docstring"""
super().__init__()
lowercase_ :List[Any] = Path(lowercase ).joinpath(type_path + ".source" )
lowercase_ :Dict = Path(lowercase ).joinpath(type_path + ".target" )
lowercase_ :Optional[int] = self.get_char_lens(self.src_file )
lowercase_ :List[str] = max_source_length
lowercase_ :str = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
lowercase_ :int = tokenizer
lowercase_ :Dict = prefix
if n_obs is not None:
lowercase_ :Union[str, Any] = self.src_lens[:n_obs]
lowercase_ :Optional[int] = src_lang
lowercase_ :str = tgt_lang
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Tuple = index + 1 # linecache starts at 1
lowercase_ :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("\n" )
lowercase_ :List[str] = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ :List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
lowercase_ :int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
lowercase_ :List[str] = encode_line(lowercase , lowercase , self.max_source_length , "right" )
lowercase_ :Any = encode_line(lowercase , lowercase , self.max_target_length , "right" )
lowercase_ :Dict = source_inputs["input_ids"].squeeze()
lowercase_ :Tuple = target_inputs["input_ids"].squeeze()
lowercase_ :Optional[int] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase__ ( lowercase : Union[str, Any] ):
"""simple docstring"""
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def lowercase__ ( self : str , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = torch.stack([x["input_ids"] for x in batch] )
lowercase_ :Dict = torch.stack([x["attention_mask"] for x in batch] )
lowercase_ :List[str] = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase_ :Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :Union[str, Any] = trim_batch(lowercase , lowercase )
lowercase_ , lowercase_ :Optional[Any] = trim_batch(lowercase , lowercase , attention_mask=lowercase )
lowercase_ :Tuple = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCAmelCase : List[str] =getLogger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :List[str] = get_git_info()
save_json(__lowerCamelCase ,os.path.join(__lowerCamelCase ,"git_log.json" ) )
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any]=4 ,**__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=__lowerCamelCase ,**__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( ):
lowercase_ :Dict = git.Repo(search_parent_directories=__lowerCamelCase )
lowercase_ :List[str] = {
"repo_id": str(__lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowerCamelCase : Callable ,__lowerCamelCase : Iterable ):
return list(map(__lowerCamelCase ,__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"wb" ) as f:
return pickle.dump(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def remove_articles(__lowerCamelCase : Optional[int] ):
return re.sub(r"\b(a|an|the)\b" ," " ,__lowerCamelCase )
def white_space_fix(__lowerCamelCase : Dict ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
lowercase_ :Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ):
lowercase_ :Tuple = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Dict = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Tuple = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowercase_ :Tuple = sum(common.values() )
if num_same == 0:
return 0
lowercase_ :Union[str, Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :List[Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ):
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ):
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowercase_ :Any = 0
for hypo, pred in zip(__lowerCamelCase ,__lowerCamelCase ):
em += exact_match_score(__lowerCamelCase ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def UpperCAmelCase_ ( __lowerCamelCase : str ):
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ):
lowercase_ :Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ :List[str] = "dropout_rate"
for p in extra_params:
if getattr(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and not hasattr(__lowerCamelCase ,equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
continue
lowercase_ :List[Any] = p if hasattr(__lowerCamelCase ,__lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase ,__lowerCamelCase ,getattr(__lowerCamelCase ,__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
return hparams, config
| 223
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class snake_case_ ( __lowercase ):
A_ = 'bert'
def __init__( self : Any , _snake_case : str=30522 , _snake_case : Dict=768 , _snake_case : Union[str, Any]=12 , _snake_case : str=12 , _snake_case : Tuple=3072 , _snake_case : Any="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : List[str]=0.1 , _snake_case : Any=512 , _snake_case : List[Any]=2 , _snake_case : List[str]=0.02 , _snake_case : Any=1E-12 , _snake_case : Dict=0 , _snake_case : Union[str, Any]="absolute" , _snake_case : int=True , _snake_case : List[Any]=None , **_snake_case : Dict , )->Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : List[Any] = hidden_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : str = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Dict = max_position_embeddings
__lowerCAmelCase : Optional[int] = type_vocab_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Optional[Any] = layer_norm_eps
__lowerCAmelCase : List[Any] = position_embedding_type
__lowerCAmelCase : Tuple = use_cache
__lowerCAmelCase : Any = classifier_dropout
class snake_case_ ( __lowercase ):
@property
def UpperCAmelCase__ ( self : Dict )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 357
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ ( __lowercase ):
def UpperCAmelCase__ ( self : Dict )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_snake_case , """num_heads""" ) )
class snake_case_ :
def __init__( self : Dict , _snake_case : int , _snake_case : str=13 , _snake_case : Optional[int]=64 , _snake_case : Union[str, Any]=3 , _snake_case : Any=[16, 48, 96] , _snake_case : List[str]=[1, 3, 6] , _snake_case : str=[1, 2, 10] , _snake_case : Tuple=[7, 3, 3] , _snake_case : Tuple=[4, 2, 2] , _snake_case : Tuple=[2, 1, 1] , _snake_case : List[str]=[2, 2, 2] , _snake_case : Tuple=[False, False, True] , _snake_case : int=[0.0, 0.0, 0.0] , _snake_case : Union[str, Any]=0.02 , _snake_case : List[str]=1E-12 , _snake_case : str=True , _snake_case : Any=True , _snake_case : Optional[Any]=2 , )->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Optional[int] = image_size
__lowerCAmelCase : Optional[Any] = patch_sizes
__lowerCAmelCase : Tuple = patch_stride
__lowerCAmelCase : List[Any] = patch_padding
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : str = use_labels
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : Tuple = embed_dim
__lowerCAmelCase : Optional[int] = num_heads
__lowerCAmelCase : Union[str, Any] = stride_kv
__lowerCAmelCase : List[Any] = depth
__lowerCAmelCase : int = cls_token
__lowerCAmelCase : Optional[Any] = attention_drop_rate
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Any = layer_norm_eps
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
# create a random int32 tensor of given shape
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] )->int:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : int , _snake_case : str , _snake_case : Union[str, Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = TFCvtModel(config=_snake_case )
__lowerCAmelCase : Optional[Any] = model(_snake_case , training=_snake_case )
__lowerCAmelCase : str = (self.image_size, self.image_size)
__lowerCAmelCase , __lowerCAmelCase : Tuple = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowerCAmelCase : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowerCAmelCase : Any = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase__ ( self : Tuple , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Optional[int] = TFCvtForImageClassification(_snake_case )
__lowerCAmelCase : str = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCAmelCase__ ( self : List[str] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = TFCvtModelTester(self )
__lowerCAmelCase : Optional[Any] = TFCvtConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] )->Optional[int]:
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCAmelCase__ ( self : str )->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Union[str, Any] )->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Tuple )->Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def UpperCAmelCase__ ( self : Tuple )->Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_snake_case )
__lowerCAmelCase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : int = [*signature.parameters.keys()]
__lowerCAmelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase__ ( self : int )->List[str]:
'''simple docstring'''
def check_hidden_states_output(_snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
__lowerCAmelCase : Any = model_class(_snake_case )
__lowerCAmelCase : Any = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowerCAmelCase : Optional[Any] = outputs.hidden_states
__lowerCAmelCase : Tuple = len(self.model_tester.depth )
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase__ ( self : Dict )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[Any] = TFCvtModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Dict )->List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCAmelCase : List[Any] = self.default_image_processor
__lowerCAmelCase : Optional[int] = prepare_img()
__lowerCAmelCase : int = image_processor(images=_snake_case , return_tensors="""tf""" )
# forward pass
__lowerCAmelCase : Dict = model(**_snake_case )
# verify the logits
__lowerCAmelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowerCAmelCase : Any = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) )
| 232
| 0
|
import math
import unittest
def _UpperCAmelCase (UpperCamelCase__ : int ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> Optional[Any]:
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(1_1))
self.assertTrue(is_prime(1_3))
self.assertTrue(is_prime(1_7))
self.assertTrue(is_prime(1_9))
self.assertTrue(is_prime(2_3))
self.assertTrue(is_prime(2_9))
def _lowerCamelCase ( self) -> List[Any]:
with self.assertRaises(__lowerCamelCase):
is_prime(-1_9)
self.assertFalse(
is_prime(0) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 11
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11
| 1
|
'''simple docstring'''
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = n
lowerCAmelCase__ : int = [None] * self.n
lowerCAmelCase__ : Union[str, Any] = 0 # index of the first element
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Union[str, Any] = 0
def __len__( self ) -> int:
return self.size
def UpperCAmelCase_ ( self ) -> bool:
return self.size == 0
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
lowerCAmelCase__ : str = data
lowerCAmelCase__ : List[str] = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase_ ( self ) -> int:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
lowerCAmelCase__ : int = self.array[self.front]
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 184
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = '''convnextv2'''
def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=224 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : Union[str, Any] = num_stages
lowerCAmelCase__ : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowerCAmelCase__ : str = [3, 3, 9, 3] if depths is None else depths
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Dict = drop_path_rate
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : int = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
| 184
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self , __UpperCAmelCase = 128 , __UpperCAmelCase = 256 , __UpperCAmelCase = 2_000.0 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = 64 , __UpperCAmelCase = 2048 , __UpperCAmelCase = 0.1 , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Sequential(
nn.Linear(__UpperCAmelCase , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , )
__lowerCamelCase = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = False
__lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
__lowerCamelCase = nn.Dropout(p=__UpperCAmelCase )
__lowerCamelCase = nn.ModuleList()
for lyr_num in range(__UpperCAmelCase ):
# FiLM conditional T5 decoder
__lowerCamelCase = DecoderLayer(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase )
self.decoders.append(__UpperCAmelCase )
__lowerCamelCase = TaLayerNorm(__UpperCAmelCase )
__lowerCamelCase = nn.Dropout(p=__UpperCAmelCase )
__lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase = self.conditioning_emb(__UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase = torch.broadcast_to(
torch.arange(__UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase = self.position_encoding(__UpperCAmelCase )
__lowerCamelCase = self.continuous_inputs_projection(__UpperCAmelCase )
inputs += position_encodings
__lowerCamelCase = self.dropout(__UpperCAmelCase )
# decoder: No padding present.
__lowerCamelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase = [(x, self.encoder_decoder_mask(__UpperCAmelCase , __UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase = lyr(
__UpperCAmelCase , conditioning_emb=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )[0]
__lowerCamelCase = self.decoder_norm(__UpperCAmelCase )
__lowerCamelCase = self.post_dropout(__UpperCAmelCase )
__lowerCamelCase = self.spec_out(__UpperCAmelCase )
return spec_out
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-6 ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase ) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = self.layer[0](
__UpperCAmelCase , conditioning_emb=__UpperCAmelCase , attention_mask=__UpperCAmelCase , )
if encoder_hidden_states is not None:
__lowerCamelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase = self.layer[1](
__UpperCAmelCase , key_value_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase = self.layer[-1](__UpperCAmelCase , __UpperCAmelCase )
return (hidden_states,)
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = TaLayerNorm(__UpperCAmelCase )
__lowerCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase )
__lowerCamelCase = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase )
__lowerCamelCase = nn.Dropout(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
# pre_self_attention_layer_norm
__lowerCamelCase = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase = self.FiLMLayer(__UpperCAmelCase , __UpperCAmelCase )
# Self-attention block
__lowerCamelCase = self.attention(__UpperCAmelCase )
__lowerCamelCase = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase )
__lowerCamelCase = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase )
__lowerCamelCase = nn.Dropout(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = self.layer_norm(__UpperCAmelCase )
__lowerCamelCase = self.attention(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase = hidden_states + self.dropout(__UpperCAmelCase )
return layer_output
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = TaDenseGatedActDense(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase )
__lowerCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase )
__lowerCamelCase = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase )
__lowerCamelCase = nn.Dropout(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase = self.film(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = self.DenseReluDense(__UpperCAmelCase )
__lowerCamelCase = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
__lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
__lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
__lowerCamelCase = nn.Dropout(__UpperCAmelCase )
__lowerCamelCase = NewGELUActivation()
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.act(self.wi_a(__UpperCAmelCase ) )
__lowerCamelCase = self.wi_a(__UpperCAmelCase )
__lowerCamelCase = hidden_gelu * hidden_linear
__lowerCamelCase = self.dropout(__UpperCAmelCase )
__lowerCamelCase = self.wo(__UpperCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1E-6 ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Parameter(torch.ones(__UpperCAmelCase ) )
__lowerCamelCase = eps
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__UpperCAmelCase )
__lowerCamelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __lowerCAmelCase ( nn.Module ):
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__UpperCAmelCase , 3.0 )) ))
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Linear(__UpperCAmelCase , out_features * 2 , bias=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.scale_bias(__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = torch.chunk(__UpperCAmelCase , 2 , -1 )
__lowerCamelCase = x * (1 + scale) + shift
return x
| 330
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowerCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330
| 1
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__A : Union[str, Any] = get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE = None )-> List[Any]:
lowerCamelCase_ =(
os.path.join(_SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase_ =Extractor
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase_ =os.path.abspath(_SCREAMING_SNAKE_CASE )
return os.path.join(self.extract_dir , hash_url_to_filename(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> bool:
return force_extract or (
not os.path.isfile(_SCREAMING_SNAKE_CASE ) and not (os.path.isdir(_SCREAMING_SNAKE_CASE ) and os.listdir(_SCREAMING_SNAKE_CASE ))
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )-> str:
lowerCamelCase_ =self.extractor.infer_extractor_format(_SCREAMING_SNAKE_CASE )
if not extractor_format:
return input_path
lowerCamelCase_ =self._get_output_path(_SCREAMING_SNAKE_CASE )
if self._do_extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.extractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return output_path
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
@classmethod
@abstractmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> bool:
...
@staticmethod
@abstractmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
...
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__):
_UpperCamelCase:List[bytes] = []
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
return f.read(_SCREAMING_SNAKE_CASE )
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = b"" )-> bool:
if not magic_number:
lowerCamelCase_ =max(len(_SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase_ =cls.read_magic_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except OSError:
return False
return any(magic_number.startswith(_SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> bool:
return tarfile.is_tarfile(_SCREAMING_SNAKE_CASE )
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> str:
def resolved(_SCREAMING_SNAKE_CASE ) -> str:
return os.path.realpath(os.path.abspath(_SCREAMING_SNAKE_CASE ) )
def badpath(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).startswith(_SCREAMING_SNAKE_CASE )
def badlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase_ =resolved(os.path.join(_SCREAMING_SNAKE_CASE , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =resolved(_SCREAMING_SNAKE_CASE )
for finfo in members:
if badpath(finfo.name , _SCREAMING_SNAKE_CASE ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tarfile.open(_SCREAMING_SNAKE_CASE )
tar_file.extractall(_SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
tar_file.close()
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[int] = [b"\x1F\x8B"]
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
with gzip.open(_SCREAMING_SNAKE_CASE , """rb""" ) as gzip_file:
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[Any] = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = b"" )-> bool:
if super().is_extractable(_SCREAMING_SNAKE_CASE , magic_number=_SCREAMING_SNAKE_CASE ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as fp:
lowerCamelCase_ =_EndRecData(_SCREAMING_SNAKE_CASE )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase_ =fp.read(_SCREAMING_SNAKE_CASE ) # CD is where we expect it to be
if len(_SCREAMING_SNAKE_CASE ) == sizeCentralDir:
lowerCamelCase_ =struct.unpack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """r""" ) as zip_file:
zip_file.extractall(_SCREAMING_SNAKE_CASE )
zip_file.close()
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[Any] = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
with lzma.open(_SCREAMING_SNAKE_CASE ) as compressed_file:
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Dict = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =rarfile.RarFile(_SCREAMING_SNAKE_CASE )
rf.extractall(_SCREAMING_SNAKE_CASE )
rf.close()
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[Any] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase_ =zstd.ZstdDecompressor()
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as ifh, open(_SCREAMING_SNAKE_CASE , """wb""" ) as ofh:
dctx.copy_stream(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
with bza.open(_SCREAMING_SNAKE_CASE , """rb""" ) as compressed_file:
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:int = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with pyazr.SevenZipFile(_SCREAMING_SNAKE_CASE , """r""" ) as archive:
archive.extractall(_SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(_SCREAMING_SNAKE_CASE , """rb""" ) as compressed_file:
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCamelCase:Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _snake_case ( cls )-> int:
return max(
len(_SCREAMING_SNAKE_CASE )
for extractor in cls.extractors.values()
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
try:
return MagicNumberBaseExtractor.read_magic_number(_SCREAMING_SNAKE_CASE , magic_number_length=_SCREAMING_SNAKE_CASE )
except OSError:
return b""
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )-> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =cls.infer_extractor_format(_SCREAMING_SNAKE_CASE )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE )-> str: # <Added version="2.4.0"/>
lowerCamelCase_ =cls._get_magic_number_max_length()
lowerCamelCase_ =cls._read_magic_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_SCREAMING_SNAKE_CASE , magic_number=_SCREAMING_SNAKE_CASE ):
return extractor_format
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "deprecated" , )-> None:
os.makedirs(os.path.dirname(_SCREAMING_SNAKE_CASE ) , exist_ok=_SCREAMING_SNAKE_CASE )
# Prevent parallel extractions
lowerCamelCase_ =str(Path(_SCREAMING_SNAKE_CASE ).with_suffix(""".lock""" ) )
with FileLock(_SCREAMING_SNAKE_CASE ):
shutil.rmtree(_SCREAMING_SNAKE_CASE , ignore_errors=_SCREAMING_SNAKE_CASE )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase_ =cls.extractors[extractor_format]
return extractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_SCREAMING_SNAKE_CASE , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_SCREAMING_SNAKE_CASE ):
return extractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 356
|
from __future__ import annotations
def __UpperCamelCase ( _A : list[int | str] ) ->None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def __UpperCamelCase ( _A : list[int | str] , _A : list[int | str] , _A : int , _A : list[int] , ) ->None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase_ =True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
lowerCamelCase_ =False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 49
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=1_3 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[Any]=9_9 , _lowerCAmelCase : Optional[int]=3_2 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : int=3_7 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=5_1_2 , _lowerCAmelCase : Tuple=1_6 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=0 , ) -> List[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = projection_dim
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
snake_case_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
snake_case_ = TFDPRContextEncoder(config=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
snake_case_ = TFDPRQuestionEncoder(config=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ) -> int:
"""simple docstring"""
snake_case_ = TFDPRReader(config=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = TFDPRModelTester(self )
snake_case_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def lowerCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRQuestionEncoder.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRReader.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
snake_case_ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case_ = model(_lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case_ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 159
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowerCAmelCase ( )->Any:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=lowerCAmelCase_ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=lowerCAmelCase_ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=lowerCAmelCase_ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=lowerCAmelCase_ , default=0 , help="cuda_id." , )
snake_case_ = parser.parse_args()
return args
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
if not len(lowerCAmelCase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
snake_case_ , snake_case_ = imgs[0].size
snake_case_ = Image.new("RGB" , size=(cols * w, rows * h) )
snake_case_ , snake_case_ = grid.size
for i, img in enumerate(lowerCAmelCase_ ):
grid.paste(lowerCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Union[str, Any]="robotic cat with wings" , lowerCAmelCase_ :Any=7.5 , lowerCAmelCase_ :Dict=50 , lowerCAmelCase_ :int=1 , lowerCAmelCase_ :Union[str, Any]=42 , )->str:
'''simple docstring'''
snake_case_ = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase_ )
snake_case_ = pipeline(
lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , ).images
snake_case_ = int(math.sqrt(lowerCAmelCase_ ) )
snake_case_ = image_grid(lowerCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE :Dict = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE :Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
SCREAMING_SNAKE_CASE :Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
SCREAMING_SNAKE_CASE :List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
SCREAMING_SNAKE_CASE :List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE :Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
SCREAMING_SNAKE_CASE :Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = unet.to(torch.device('''cuda''', args.cuda_id))
SCREAMING_SNAKE_CASE :Optional[int] = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
SCREAMING_SNAKE_CASE :Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 159
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3FeatureExtractor''']
UpperCamelCase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str | Literal[False]:
A: List[str] = list(__lowercase )
A: Optional[Any] = list(__lowercase )
A: int = 0
for i in range(len(__lowercase ) ):
if lista[i] != lista[i]:
count += 1
A: Optional[Any] = '''_'''
if count > 1:
return False
else:
return "".join(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[str]:
A: Any = []
while True:
A: Dict = ['''$'''] * len(__lowercase )
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
for j in range(i + 1 , len(__lowercase ) ):
A: Any = compare_string(binary[i] , binary[j] )
if k is False:
A: Any = '''*'''
A: List[Any] = '''*'''
temp.append('''X''' )
for i in range(len(__lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowercase ) == 0:
return pi
A: List[Any] = list(set(__lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]:
A: Optional[int] = []
for minterm in minterms:
A: Optional[int] = ''''''
for _ in range(__lowercase ):
A: List[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowercase )
return temp
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> bool:
A: Union[str, Any] = list(__lowercase )
A: Union[str, Any] = list(__lowercase )
A: Optional[int] = 0
for i in range(len(__lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]:
A: List[Any] = []
A: Dict = [0] * len(__lowercase )
for i in range(len(chart[0] ) ):
A: List[str] = 0
A: str = -1
for j in range(len(__lowercase ) ):
if chart[j][i] == 1:
count += 1
A: Any = j
if count == 1:
A: Any = 1
for i in range(len(__lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowercase ) ):
A: Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
A: Dict = 0
A: Optional[int] = -1
A: Dict = 0
for i in range(len(__lowercase ) ):
A: str = chart[i].count(1 )
if count_n > max_n:
A: Tuple = count_n
A: Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowercase ) ):
A: Any = 0
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[int]]:
A: str = [[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )]
for i in range(len(__lowercase ) ):
A: Tuple = prime_implicants[i].count('''_''' )
for j in range(len(__lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowercase ):
A: Optional[Any] = 1
return chart
def SCREAMING_SNAKE_CASE( ) -> None:
A: int = int(input('''Enter the no. of variables\n''' ) )
A: Optional[int] = [
float(__lowercase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
A: List[str] = decimal_to_binary(__lowercase , __lowercase )
A: str = check(__lowercase )
print('''Prime Implicants are:''' )
print(__lowercase )
A: List[Any] = prime_implicant_chart(__lowercase , __lowercase )
A: Any = selection(__lowercase , __lowercase )
print('''Essential Prime Implicants are:''' )
print(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 334
| 0
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__lowerCamelCase : Dict = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _snake_case ( lowerCAmelCase : Dict ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
__lowerCamelCase : Tuple = parser.parse_args()
if args.check_lib:
__lowerCamelCase : Optional[Any] = importlib.import_module('''transformers''')
__lowerCamelCase : str = Path(transformers_module.__file__).parent
else:
__lowerCamelCase : Tuple = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 18
|
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18
| 1
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : List[Any]) -> List[Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
move_disk(lowerCamelCase__ , lowerCamelCase__)
move_tower(height - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int) -> Optional[Any]:
'''simple docstring'''
print("moving disk from" , lowerCamelCase__ , "to" , lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = int(input("Height of hanoi: ").strip())
move_tower(lowerCamelCase__ , "A" , "B" , "C")
if __name__ == "__main__":
main()
| 354
|
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.array) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] ={
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict =[
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""image_processor""", """tokenizer"""]
snake_case_ ="""Pix2StructImageProcessor"""
snake_case_ =("""T5Tokenizer""", """T5TokenizerFast""")
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = False
super().__init__(__lowerCamelCase ,__lowerCamelCase )
def __call__(self ,__lowerCamelCase=None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = 20_48 ,__lowerCamelCase = 0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase__ : List[str] = self.tokenizer
lowerCAmelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase__ : int = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,**__lowerCamelCase )
else:
# add pixel_values and bbox
lowerCAmelCase__ : List[str] = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,header_text=__lowerCamelCase ,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
if "attention_mask" in text_encoding:
lowerCAmelCase__ : List[str] = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowerCAmelCase__ : Dict = text_encoding.pop('''input_ids''' )
else:
lowerCAmelCase__ : int = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.tokenizer.model_input_names
lowerCAmelCase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 129
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] ="unispeech-sat"
def __init__( self : Dict , a : str=32 , a : Any=7_68 , a : Optional[Any]=12 , a : Optional[int]=12 , a : int=30_72 , a : int="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : List[Any]=0.1 , a : Tuple=0.0 , a : Optional[Any]=0.0 , a : Tuple=0.1 , a : List[Any]=0.1 , a : str=0.02 , a : List[Any]=1e-5 , a : int="group" , a : Union[str, Any]="gelu" , a : Optional[int]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , a : int=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Any=1_28 , a : Tuple=16 , a : str=False , a : Optional[Any]=True , a : Dict=0.05 , a : List[Any]=10 , a : Any=2 , a : Optional[Any]=0.0 , a : Optional[Any]=10 , a : Any=0 , a : Any=3_20 , a : str=2 , a : List[str]=0.1 , a : List[str]=1_00 , a : List[str]=2_56 , a : str=2_56 , a : Dict=0.1 , a : Optional[Any]="mean" , a : str=False , a : Tuple=False , a : Optional[Any]=2_56 , a : int=(5_12, 5_12, 5_12, 5_12, 15_00) , a : int=(5, 3, 3, 1, 1) , a : Any=(1, 2, 3, 1, 1) , a : Union[str, Any]=5_12 , a : Optional[int]=0 , a : Optional[int]=1 , a : Optional[int]=2 , a : int=5_04 , **a : Dict , ):
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = num_clusters
__lowerCamelCase = do_stable_layer_norm
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase = num_codevectors_per_group
__lowerCamelCase = num_codevector_groups
__lowerCamelCase = contrastive_logits_temperature
__lowerCamelCase = feat_quantizer_dropout
__lowerCamelCase = num_negatives
__lowerCamelCase = codevector_dim
__lowerCamelCase = proj_codevector_dim
__lowerCamelCase = diversity_loss_weight
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 237
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 237
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(__A ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 42
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42
| 1
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.log_outputs
SCREAMING_SNAKE_CASE__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE__ = load_metric('''wer''' )
SCREAMING_SNAKE_CASE__ = load_metric('''cer''' )
# compute metrics
SCREAMING_SNAKE_CASE__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
SCREAMING_SNAKE_CASE__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
SCREAMING_SNAKE_CASE__ = F'''WER: {wer_result}\nCER: {cer_result}'''
print(_A )
with open(F'''{dataset_id}_eval_results.txt''' , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE__ = F'''log_{dataset_id}_predictions.txt'''
SCREAMING_SNAKE_CASE__ = F'''log_{dataset_id}_targets.txt'''
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(F'''{i}''' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F'''{i}''' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE__ = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE__ = ''' '''.join(text.split(_A ) )
return text
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE__ = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE__ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE__ = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
SCREAMING_SNAKE_CASE__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE__ = prediction['''text''']
SCREAMING_SNAKE_CASE__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE__ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
main(args)
| 218
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Tuple=0.01 , __lowerCamelCase : Optional[Any]=1000 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = p_stop
SCREAMING_SNAKE_CASE__ = max_length
def __iter__( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE__ = random.random() < self.p_stop
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=True ) -> Dict:
SCREAMING_SNAKE_CASE__ = [
BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
for i in range(2 )
]
SCREAMING_SNAKE_CASE__ = [list(__lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCamelCase ) for shard in batch_sampler_shards] , [len(__lowerCamelCase ) for e in expected] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> int:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
def lowercase_ ( self : str ) -> Dict:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE__ = [BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , even_batches=__lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict=False ) -> str:
random.seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
IterableDatasetShard(
__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=__lowerCamelCase , num_processes=__lowerCamelCase , process_index=__lowerCamelCase , split_batches=__lowerCamelCase , )
for i in range(__lowerCamelCase )
]
SCREAMING_SNAKE_CASE__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCamelCase )
iterable_dataset_lists.append(list(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(len(__lowerCamelCase ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE__ = []
for idx in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCamelCase ) < len(__lowerCamelCase ):
reference += reference
self.assertListEqual(__lowerCamelCase , reference[: len(__lowerCamelCase )] )
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = SkipBatchSampler(__lowerCamelCase , 2 )
self.assertListEqual(list(__lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE__ = skip_first_batches(__lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self : Union[str, Any] ) -> str:
Accelerator()
SCREAMING_SNAKE_CASE__ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 218
| 1
|
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int = 1_00 ):
A__ = 0
A__ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 237
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
def __init__( self :Optional[int] , lowercase_ :int )-> None:
A__ = order
# a_{0} ... a_{k}
A__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ = [0.0] * self.order
def UpperCAmelCase_ ( self :List[str] , lowercase_ :list[float] , lowercase_ :list[float] )-> None:
if len(lowercase_ ) < self.order:
A__ = [1.0, *a_coeffs]
if len(lowercase_ ) != self.order + 1:
A__ = (
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(lowercase_ )}"
)
raise ValueError(lowercase_ )
if len(lowercase_ ) != self.order + 1:
A__ = (
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(lowercase_ )}"
)
raise ValueError(lowercase_ )
A__ = a_coeffs
A__ = b_coeffs
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :float )-> float:
A__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ = self.input_history[:-1]
A__ = self.output_history[:-1]
A__ = sample
A__ = result
return result
| 237
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=7 , snake_case_ : Tuple=3 , snake_case_ : List[Any]=18 , snake_case_ : Optional[Any]=30 , snake_case_ : Any=400 , snake_case_ : List[str]=True , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=True , snake_case_ : str=None , ) -> Union[str, Any]:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 20}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
def __magic_name__ ( self : Any ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( A_, unittest.TestCase ):
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def __magic_name__ ( self : str ) -> int:
'''simple docstring'''
A__ = MobileNetVaImageProcessingTester(self )
@property
def __magic_name__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case_ , "crop_size" ) )
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 356
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10_00 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A__ = n - 1
A__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A__ = 0
while count < prec:
A__ = random.randint(2 , n - 1 )
A__ = bin_exp_mod(lowercase_ , lowercase_ , lowercase_ )
if b != 1:
A__ = True
for _ in range(lowercase_ ):
if b == n - 1:
A__ = False
break
A__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230
| 0
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> str:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
SCREAMING_SNAKE_CASE_ : List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
SCREAMING_SNAKE_CASE_ : List[int] = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
SCREAMING_SNAKE_CASE_ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
SCREAMING_SNAKE_CASE_ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Benchmark training of model"""} )
SCREAMING_SNAKE_CASE_ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Verbose memory tracing"""} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
SCREAMING_SNAKE_CASE_ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Trace memory line by line"""} )
SCREAMING_SNAKE_CASE_ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Save result to a CSV file"""} )
SCREAMING_SNAKE_CASE_ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Save all print statements in a log file"""} )
SCREAMING_SNAKE_CASE_ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Whether to print environment information"""} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
SCREAMING_SNAKE_CASE_ : str = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default=f"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
SCREAMING_SNAKE_CASE_ : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def __A ( self ) -> str:
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , lowerCAmelCase__ , )
def __A ( self ) -> str:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __A ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def __A ( self ) -> Tuple:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 113
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__UpperCamelCase = HfApi()
__UpperCamelCase = {}
# fmt: off
__UpperCamelCase = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
__UpperCamelCase = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
__UpperCamelCase = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
__UpperCamelCase = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
__UpperCamelCase = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
__UpperCamelCase = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
__UpperCamelCase = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
__UpperCamelCase = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
__UpperCamelCase = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
__UpperCamelCase = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
__UpperCamelCase = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
__UpperCamelCase = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
__UpperCamelCase = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
__UpperCamelCase = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
__UpperCamelCase = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
__UpperCamelCase = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__UpperCamelCase = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
__UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__UpperCamelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__UpperCamelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__UpperCamelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 113
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = BlipImageProcessor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE__ : Tuple = BlipProcessor(_UpperCAmelCase, _UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : List[str], **_UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **_UpperCAmelCase ).tokenizer
def A_ ( self : Optional[int], **_UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **_UpperCAmelCase ).image_processor
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)" )
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor(do_normalize=_UpperCAmelCase, padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : str = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=_UpperCAmelCase, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, _UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict = BlipProcessor(tokenizer=_UpperCAmelCase, image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processor(_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : int = processor(images=_UpperCAmelCase, return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = BlipProcessor(tokenizer=_UpperCAmelCase, image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = "lower newer"
SCREAMING_SNAKE_CASE__ : Tuple = processor(text=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(_UpperCAmelCase, return_token_type_ids=_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = BlipProcessor(tokenizer=_UpperCAmelCase, image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = processor(text=_UpperCAmelCase, images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ), ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = BlipProcessor(tokenizer=_UpperCAmelCase, image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : Any = processor.batch_decode(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = BlipProcessor(tokenizer=_UpperCAmelCase, image_processor=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = processor(text=_UpperCAmelCase, images=_UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["pixel_values", "input_ids", "attention_mask"] )
| 366
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> complex:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = symbols(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = lambdify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = lambdify(SCREAMING_SNAKE_CASE__ , diff(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Any = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE__ ) / diff_function(
SCREAMING_SNAKE_CASE__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 191
| 0
|
"""simple docstring"""
import cva
import numpy as np
class __A :
"""simple docstring"""
def __init__( self , __A , __A ) -> int:
if k in (0.04, 0.06):
a =k
a =window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
return str(self.k )
def SCREAMING_SNAKE_CASE ( self , __A ) -> tuple[cva.Mat, list[list[int]]]:
a =cva.imread(__A , 0 )
a , a =img.shape
a =[]
a =img.copy()
a =cva.cvtColor(__A , cva.COLOR_GRAY2RGB )
a , a =np.gradient(__A )
a =dx**2
a =dy**2
a =dx * dy
a =0.04
a =self.window_size // 2
for y in range(__A , h - offset ):
for x in range(__A , w - offset ):
a =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =(wxx * wyy) - (wxy**2)
a =wxx + wyy
a =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : int = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 81
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states
| 96
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'Salesforce/blip-image-captioning-base'
lowerCAmelCase : Tuple = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCAmelCase : Optional[int] = 'image_captioner'
lowerCAmelCase : List[str] = AutoModelForVisionaSeq
lowerCAmelCase : Tuple = ['image']
lowerCAmelCase : Optional[Any] = ['text']
def __init__( self : Dict ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ):
requires_backends(self ,['vision'] )
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : "Image" ):
return self.pre_processor(images=_UpperCAmelCase ,return_tensors='pt' )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int ):
return self.model.generate(**_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Dict ):
return self.pre_processor.batch_decode(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase )[0].strip()
| 89
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
| 0
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = XLMProphetNetTokenizer
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
def _a (self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Union[str, Any] = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
A_ : int = """[PAD]"""
A_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowercase ) , 1012 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def _a (self ):
A_ : int = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase )
A_ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
A_ : Any = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def _a (self ):
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def _a (self ):
A_ : List[str] = """Hello World!"""
A_ : Optional[int] = [35389, 6672, 49, 2]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def _a (self ):
# fmt: off
A_ : Tuple = {"""input_ids""": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 135
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , lowercase=2 , ):
A_ : List[str] = parent
A_ : str = batch_size
A_ : Optional[Any] = image_size
A_ : List[str] = patch_size
A_ : List[str] = num_channels
A_ : List[str] = is_training
A_ : str = use_labels
A_ : List[str] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[int] = type_sequence_label_size
A_ : Any = initializer_range
A_ : int = scope
A_ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Dict = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def _a (self ):
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ViTModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ViTForMaskedImageModeling(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : Any = ViTForMaskedImageModeling(lowercase )
model.to(lowercase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[int] = model(lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Dict = self.type_sequence_label_size
A_ : str = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Any = 1
A_ : str = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
A_ : str = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
),
) : Optional[int] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def _a (self ):
A_ : Any = ViTModelTester(self )
A_ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = ViTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Optional[int] = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowercase )
A_ : List[str] = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : str = model(**lowercase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A_ : Optional[int] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowercase )
A_ : List[Any] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
A_ : Dict = prepare_img()
A_ : str = image_processor(images=lowercase , return_tensors="""pt""" )
A_ : int = inputs.pixel_values.to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(lowercase , interpolate_pos_encoding=lowercase )
# verify the logits
A_ : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
A_ : List[Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
A_ : List[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : Any = prepare_img()
A_ : List[str] = image_processor(images=lowercase , return_tensors="""pt""" )
A_ : Any = inputs.pixel_values.to(lowercase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : Optional[Any] = model(lowercase )
| 135
| 1
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """align_text_model"""
def __init__( self , lowercase_=3_0522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Dict = use_cache
UpperCAmelCase_ : Optional[int] = pad_token_id
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : int = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase_ : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """align_vision_model"""
def __init__( self , lowercase_ = 3 , lowercase_ = 600 , lowercase_ = 2.0 , lowercase_ = 3.1 , lowercase_ = 8 , lowercase_ = [3, 3, 5, 3, 5, 5, 3] , lowercase_ = [32, 16, 24, 40, 80, 112, 192] , lowercase_ = [16, 24, 40, 80, 112, 192, 320] , lowercase_ = [] , lowercase_ = [1, 2, 2, 2, 1, 2, 1] , lowercase_ = [1, 2, 2, 3, 3, 4, 1] , lowercase_ = [1, 6, 6, 6, 6, 6, 6] , lowercase_ = 0.25 , lowercase_ = "swish" , lowercase_ = 2560 , lowercase_ = "mean" , lowercase_ = 0.02 , lowercase_ = 0.0_01 , lowercase_ = 0.99 , lowercase_ = 0.2 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Any = width_coefficient
UpperCAmelCase_ : Dict = depth_coefficient
UpperCAmelCase_ : Union[str, Any] = depth_divisor
UpperCAmelCase_ : int = kernel_sizes
UpperCAmelCase_ : Dict = in_channels
UpperCAmelCase_ : Tuple = out_channels
UpperCAmelCase_ : Optional[int] = depthwise_padding
UpperCAmelCase_ : Dict = strides
UpperCAmelCase_ : Tuple = num_block_repeats
UpperCAmelCase_ : Tuple = expand_ratios
UpperCAmelCase_ : Dict = squeeze_expansion_ratio
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dim
UpperCAmelCase_ : List[Any] = pooling_type
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : str = batch_norm_eps
UpperCAmelCase_ : List[str] = batch_norm_momentum
UpperCAmelCase_ : Any = drop_connect_rate
UpperCAmelCase_ : str = sum(lowercase_ ) * 4
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """align"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=640 , lowercase_=1.0 , lowercase_=0.02 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
if text_config is None:
UpperCAmelCase_ : Union[str, Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ : Tuple = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
UpperCAmelCase_ : Tuple = AlignTextConfig(**lowercase_ )
UpperCAmelCase_ : str = AlignVisionConfig(**lowercase_ )
UpperCAmelCase_ : List[Any] = projection_dim
UpperCAmelCase_ : Union[str, Any] = temperature_init_value
UpperCAmelCase_ : Union[str, Any] = initializer_range
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Dict = self.text_config.to_dict()
UpperCAmelCase_ : str = self.vision_config.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 61
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__a : List[str] = Lock()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__lowercase = min(lowercase , lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__lowercase = max(lowercase , lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
__lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
for i in range(1 , len(lowercase ) - 1 ):
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
process_array_.append(
Process(
target=lowercase , args=(
len(lowercase ) - 1,
arr[len(lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase ) ):
__lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*lowercase )
__lowercase = odd_even_transposition(lowercase )
print('''Sorted List\n''' )
print(*lowercase )
if __name__ == "__main__":
main()
| 210
| 0
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a_ = """scheduler_config.json"""
class __snake_case ( __lowercase ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class __snake_case ( __lowercase ):
"""simple docstring"""
_lowerCamelCase = 42
class __snake_case :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ["dtype"]
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=False , **__lowerCamelCase , ):
'''simple docstring'''
__A : Tuple = cls.load_config(
pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , )
__A : Dict = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ )
if hasattr(snake_case_ , '''create_state''' ) and getattr(snake_case_ , '''has_state''' , snake_case_ ):
__A : str = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False , **__lowerCamelCase ):
'''simple docstring'''
self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCamelCase__( cls ):
'''simple docstring'''
__A : Tuple = list(set([cls.__name__] + cls._compatibles ) )
__A : Union[str, Any] = importlib.import_module(__name__.split('''.''' )[0] )
__A : List[Any] = [
getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ )
]
return compatible_classes
def __lowercase ( snake_case_ : jnp.ndarray ,snake_case_ : Tuple[int] ) ->Tuple:
'''simple docstring'''
assert len(lowerCAmelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCAmelCase__ ) - x.ndim) ) ,lowerCAmelCase__ )
def __lowercase ( snake_case_ : int ,snake_case_ : int=0.999 ,snake_case_ : Any=jnp.floataa ) ->Dict:
'''simple docstring'''
def alpha_bar(snake_case_ : int ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
__A : Optional[Any] = []
for i in range(lowerCAmelCase__ ):
__A : List[str] = i / num_diffusion_timesteps
__A : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCAmelCase__ ) / alpha_bar(lowerCAmelCase__ ) ,lowerCAmelCase__ ) )
return jnp.array(lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
@flax.struct.dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase ):
'''simple docstring'''
__A : int = scheduler.config
if config.trained_betas is not None:
__A : Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__A : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__A : Union[str, Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__A : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
__A : List[str] = 1.0 - betas
__A : Any = jnp.cumprod(snake_case_ , axis=0 )
return cls(
alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , )
def __lowercase ( snake_case_ : CommonSchedulerState ,snake_case_ : jnp.ndarray ,snake_case_ : jnp.ndarray ,snake_case_ : jnp.ndarray ) ->Optional[Any]:
'''simple docstring'''
__A : Optional[Any] = state.alphas_cumprod
__A : str = alphas_cumprod[timesteps] ** 0.5
__A : List[Any] = sqrt_alpha_prod.flatten()
__A : Tuple = broadcast_to_shape_from_left(lowerCAmelCase__ ,original_samples.shape )
__A : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__A : List[Any] = sqrt_one_minus_alpha_prod.flatten()
__A : Any = broadcast_to_shape_from_left(lowerCAmelCase__ ,original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowercase ( snake_case_ : CommonSchedulerState ,snake_case_ : jnp.ndarray ,snake_case_ : jnp.ndarray ,snake_case_ : jnp.ndarray ) ->Tuple:
'''simple docstring'''
__A : str = get_sqrt_alpha_prod(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
__A : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowercase ( snake_case_ : CommonSchedulerState ,snake_case_ : jnp.ndarray ,snake_case_ : jnp.ndarray ,snake_case_ : jnp.ndarray ) ->str:
'''simple docstring'''
__A : Union[str, Any] = get_sqrt_alpha_prod(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
__A : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 352
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ = pytest.mark.integration
a_ = {"""comet"""}
a_ = importlib.util.find_spec("""fairseq""") is not None
a_ = {"""code_eval"""}
a_ = os.name == """nt"""
a_ = {"""bertscore""", """frugalscore""", """perplexity"""}
a_ = importlib.util.find_spec("""transformers""") is not None
def __lowercase ( snake_case_ : str ) ->Any:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : List[Any] ,snake_case_ : int ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( snake_case_ : int ) ->str:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : List[Any] ,snake_case_ : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( snake_case_ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : int ,snake_case_ : Dict ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( ) ->Tuple:
'''simple docstring'''
__A : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class __snake_case ( parameterized.TestCase ):
"""simple docstring"""
_lowerCamelCase = {}
_lowerCamelCase = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : int = '''[...]'''
__A : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path )
__A : str = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCamelCase )
# check parameters
__A : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCamelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__A : Tuple = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = '''[...]'''
__A : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__A : Union[str, Any] = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCamelCase ):
yield
else:
yield
@contextmanager
def UpperCamelCase__( self ):
'''simple docstring'''
def load_local_metric(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ):
return load_metric(os.path.join('''metrics''' , __lowerCamelCase ) , *__lowerCamelCase , **__lowerCamelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__A : List[Any] = load_local_metric
yield
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase ):
'''simple docstring'''
def wrapper(__lowerCamelCase ):
__A : Any = contextmanager(__lowerCamelCase )
__A : Optional[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __lowercase ( snake_case_ : Tuple ) ->int:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__A : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __lowercase ( snake_case_ : List[str] ) ->Dict:
'''simple docstring'''
import torch
def bert_cos_score_idf(snake_case_ : Union[str, Any] ,snake_case_ : List[str] ,*snake_case_ : List[str] ,**snake_case_ : Dict ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__A : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __lowercase ( snake_case_ : Optional[int] ) ->List[Any]:
'''simple docstring'''
def load_from_checkpoint(snake_case_ : str ):
class __snake_case :
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
assert len(__lowerCamelCase ) == 2
__A : str = [0.1_9, 0.9_2]
return scores, sum(__lowerCamelCase ) / len(__lowerCamelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__A : int = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__A : Dict = load_from_checkpoint
yield
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Optional[Any] = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) )
__A : Optional[int] = '''ERROR'''
__A : str = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(snake_case_ ,match=re.escape(snake_case_ ) ):
metric.compute(predictions=[] ,references=[] ,scheme=snake_case_ )
| 291
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=3, lowerCAmelCase=32, lowerCAmelCase=3, lowerCAmelCase=10, lowerCAmelCase=[10, 20, 30, 40], lowerCAmelCase=[1, 1, 2, 1], lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase="relu", lowerCAmelCase=3, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =embeddings_size
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =hidden_act
lowerCamelCase_ =num_labels
lowerCamelCase_ =scope
lowerCamelCase_ =len(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =FlaxRegNetModel(config=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaxRegNetForImageClassification(config=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase : List[str] =False
lowercase : List[str] =False
lowercase : Optional[int] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaxRegNetModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ =self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ), expected_num_stages + 1 )
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase, **lowerCAmelCase ):
return model(pixel_values=lowerCAmelCase, **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase_ =model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase_ =model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase, lowerCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =model(**lowerCAmelCase )
# verify the logits
lowerCamelCase_ =(1, 1_000)
self.assertEqual(outputs.logits.shape, lowerCAmelCase )
lowerCamelCase_ =jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
| 75
|
'''simple docstring'''
a_ : Any = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
a_ : Any = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
a_ : Optional[Any] = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
a_ : str = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
a_ : Optional[int] = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
a_ : Dict = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
a_ : Tuple = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
a_ : Any = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 75
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =StableDiffusionInpaintPipeline
__a =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a =frozenset([] )
def UpperCamelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
_a = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_a = CLIPTextModel(__a )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self : str , __a : Any , __a : int=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
_a = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : Dict ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionInpaintPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
_a = sd_pipe(**__a ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : List[str] ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_a = "stabilityai/stable-diffusion-2-inpainting"
_a = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = "Face of a yellow cat, high resolution, sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def UpperCamelCase__ ( self : Any ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_a = "stabilityai/stable-diffusion-2-inpainting"
_a = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = "Face of a yellow cat, high resolution, sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__ ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_a = "stabilityai/stable-diffusion-2-inpainting"
_a = PNDMScheduler.from_pretrained(__a , subfolder="scheduler" )
_a = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = "Face of a yellow cat, high resolution, sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type="np" , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 346
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 10 ) -> str:
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError("Invalid input" )
_a = 10**n
_a = 2_8433 * (pow(2 , 783_0457 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 346
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :str = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
SCREAMING_SNAKE_CASE :str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15
|
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__a = logging.getLogger(__name__)
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = """token-classification"""
def __init__( self: Any , snake_case: Tuple ) -> List[Any]:
if type(snake_case ) == dict:
snake_case_ :Optional[int] = Namespace(**snake_case )
snake_case_ :Optional[int] = import_module("""tasks""" )
try:
snake_case_ :Any = getattr(snake_case , hparams.task_type )
snake_case_ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels )
snake_case_ :str = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]:
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Optional[Any] = self(**snake_case )
snake_case_ :List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_ :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case_ :Optional[int] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :Optional[int] = torch.load(snake_case )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
snake_case_ :Any = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case )
torch.save(snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader:
snake_case_ :int = self._feature_file(snake_case )
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :str = torch.load(snake_case )
snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]:
"""Compute validation""" ""
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :Dict = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Dict = self(**snake_case )
snake_case_, snake_case_ :Dict = outputs[:2]
snake_case_ :Union[str, Any] = logits.detach().cpu().numpy()
snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple:
snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
snake_case_ :Tuple = np.argmax(snake_case , axis=2 )
snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case_ :Optional[Any] = dict(enumerate(self.labels ) )
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case_ :str = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case , snake_case ),
"""precision""": precision_score(snake_case , snake_case ),
"""recall""": recall_score(snake_case , snake_case ),
"""f1""": fa_score(snake_case , snake_case ),
}
snake_case_ :List[Any] = dict(results.items() )
snake_case_ :Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]:
# when stable
snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case )
snake_case_ :str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any:
# updating to test_epoch_end instead of deprecated test_end
snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case_ :Optional[int] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__a = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__a = NERTransformer.add_model_specific_args(parser, os.getcwd())
__a = parser.parse_args()
__a = NERTransformer(args)
__a = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__a = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 66
| 0
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[Any] = iter(UpperCAmelCase )
while True:
lowercase__ : Union[str, Any] = tuple(itertools.islice(UpperCAmelCase , UpperCAmelCase ) )
if not chunk:
return
yield chunk
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : str = ''''''
if len(UpperCAmelCase ) < 2:
return dirty
for i in range(len(UpperCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase ) & 1:
clean += "X"
return clean
def __UpperCamelCase ( UpperCAmelCase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowercase__ : Union[str, Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase )
return table
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = generate_table(UpperCAmelCase )
lowercase__ : Dict = prepare_input(UpperCAmelCase )
lowercase__ : Optional[int] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase , 2 ):
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(UpperCAmelCase ) , 5 )
lowercase__ , lowercase__ : int = divmod(table.index(UpperCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = generate_table(UpperCAmelCase )
lowercase__ : Any = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase , 2 ):
lowercase__ , lowercase__ : Union[str, Any] = divmod(table.index(UpperCAmelCase ) , 5 )
lowercase__ , lowercase__ : Any = divmod(table.index(UpperCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 214
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a: Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = "utf-8"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True # deprecated
SCREAMING_SNAKE_CASE = None # deprecated
SCREAMING_SNAKE_CASE = 1_0 << 2_0 # 10MB
SCREAMING_SNAKE_CASE = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = JsonConfig
def _lowerCAmelCase( self ) -> Any:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowercase__ : Tuple = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
lowercase__ : List[str] = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = [files]
lowercase__ : Dict = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase__ : str = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : List[str] = [files]
lowercase__ : Optional[Any] = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def _lowerCAmelCase( self , __lowerCAmelCase ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase__ : Optional[int] = self.config.features.arrow_schema.field(__lowerCAmelCase ).type
lowercase__ : Union[str, Any] = pa_table.append_column(__lowerCAmelCase , pa.array([None] * len(__lowerCAmelCase ) , type=__lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ : Dict = table_cast(__lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase__ : Dict = json.load(__lowerCAmelCase )
# We keep only the field we are interested in
lowercase__ : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCAmelCase , (list, tuple) ):
lowercase__ : List[Any] = set().union(*[row.keys() for row in dataset] )
lowercase__ : List[Any] = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
else:
lowercase__ : Union[str, Any] = dataset
lowercase__ : Optional[int] = pa.Table.from_pydict(__lowerCAmelCase )
yield file_idx, self._cast_table(__lowerCAmelCase )
# If the file has one json object per line
else:
with open(__lowerCAmelCase , '''rb''' ) as f:
lowercase__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
lowercase__ : Union[str, Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowercase__ : Tuple = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase__ : Dict = batch.decode(self.config.encoding , errors=__lowerCAmelCase ).encode('''utf-8''' )
try:
while True:
try:
lowercase__ : str = paj.read_json(
io.BytesIO(__lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=__lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCAmelCase )
or block_size > len(__lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__lowerCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase__ : Optional[int] = json.load(__lowerCAmelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
lowercase__ : List[Any] = set().union(*[row.keys() for row in dataset] )
lowercase__ : str = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
lowercase__ : Optional[int] = pa.Table.from_pydict(__lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__lowerCAmelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase )
batch_idx += 1
| 214
| 1
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
snake_case_ = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case_ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case_ = name[1:]
# figure out how many levels deep the name is
snake_case_ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(SCREAMING_SNAKE_CASE__ )
# read data
snake_case_ = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
names.append('''/'''.join(SCREAMING_SNAKE_CASE__ ) )
arrays.append(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Read a total of {len(SCREAMING_SNAKE_CASE__ ):,} layers''' )
# Sanity check
if len(set(SCREAMING_SNAKE_CASE__ ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(SCREAMING_SNAKE_CASE__ ) )})''' )
snake_case_ = list(set(SCREAMING_SNAKE_CASE__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = full_name.split('''/''' )
snake_case_ = model
snake_case_ = []
for i, m_name in enumerate(SCREAMING_SNAKE_CASE__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
snake_case_ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''encoder''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''layer''' )
snake_case_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''pooler''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''token_type_embeddings''' )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append('''weight''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''intermediate''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''weight''' )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , SCREAMING_SNAKE_CASE__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , SCREAMING_SNAKE_CASE__ ):
snake_case_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case_ = array.transpose()
if pointer.shape == array.shape:
snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
snake_case_ = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
snake_case_ = BertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCAmelCase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 8
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
def UpperCamelCase( ) -> List[str]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase_ = generate_large_matrix()
lowerCamelCase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCamelCase( lowercase_ ) -> int:
'''simple docstring'''
assert all(row == sorted(a_ , reverse=a_ ) for row in grid )
assert all(list(a_ ) == sorted(a_ , reverse=a_ ) for col in zip(*a_ ) )
def UpperCamelCase( lowercase_ ) -> List[Any]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(a_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case_ = (left + right) // 2
snake_case_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case_ = mid + 1
else:
snake_case_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_ )
def UpperCamelCase( lowercase_ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(grid[0] )
for i in range(len(a_ ) ):
snake_case_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(a_ ) * len(grid[0] )) - total
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def UpperCamelCase( lowercase_ ) -> Dict:
'''simple docstring'''
snake_case_ = 0
for row in grid:
for i, number in enumerate(a_ ):
if number < 0:
total += len(a_ ) - i
break
return total
def UpperCamelCase( ) -> Optional[Any]:
'''simple docstring'''
from timeit import timeit
print("""Running benchmarks""" )
snake_case_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case_ = timeit(f'''{func}(grid=grid)''' , setup=a_ , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 367
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase( lowercase_ ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase( lowercase_ , lowercase_ ) -> XGBClassifier:
'''simple docstring'''
snake_case_ = XGBClassifier()
classifier.fit(lowercase_ , lowercase_ )
return classifier
def UpperCamelCase( ) -> None:
'''simple docstring'''
snake_case_ = load_iris()
snake_case_ , snake_case_ = data_handling(lowercase_ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(
lowercase_ , lowercase_ , test_size=0.25 )
snake_case_ = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case_ = xgboost(lowercase_ , lowercase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase_ , lowercase_ , lowercase_ , display_labels=lowercase_ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 34
| 0
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["image_processor", "tokenizer"]
__UpperCAmelCase : List[str] = "AutoImageProcessor"
__UpperCAmelCase : List[Any] = "AutoTokenizer"
def __init__( self : List[Any], UpperCAmelCase__ : int=None, UpperCAmelCase__ : Any=None, **UpperCAmelCase__ : Tuple ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", UpperCAmelCase__, )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.image_processor
__lowercase = False
def __call__( self : Optional[Any], *UpperCAmelCase__ : int, **UpperCAmelCase__ : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase__, **UpperCAmelCase__ )
__lowercase = kwargs.pop("images", UpperCAmelCase__ )
__lowercase = kwargs.pop("text", UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
__lowercase = self.image_processor(UpperCAmelCase__, *UpperCAmelCase__, **UpperCAmelCase__ )
if text is not None:
__lowercase = self.tokenizer(UpperCAmelCase__, **UpperCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowercase = encodings["input_ids"]
return inputs
def _lowercase ( self : Union[str, Any], *UpperCAmelCase__ : int, **UpperCAmelCase__ : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Any, *UpperCAmelCase__ : str, **UpperCAmelCase__ : Optional[Any] ):
return self.tokenizer.decode(*UpperCAmelCase__, **UpperCAmelCase__ )
@contextmanager
def _lowercase ( self : str ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
__lowercase = True
__lowercase = self.tokenizer
yield
__lowercase = self.image_processor
__lowercase = False
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any]=False, UpperCAmelCase__ : List[Any]=None ):
if added_vocab is None:
__lowercase = self.tokenizer.get_added_vocab()
__lowercase = {}
while tokens:
__lowercase = re.search(r"<s_(.*?)>", UpperCAmelCase__, re.IGNORECASE )
if start_token is None:
break
__lowercase = start_token.group(1 )
__lowercase = re.search(rF"""</s_{key}>""", UpperCAmelCase__, re.IGNORECASE )
__lowercase = start_token.group()
if end_token is None:
__lowercase = tokens.replace(UpperCAmelCase__, "" )
else:
__lowercase = end_token.group()
__lowercase = re.escape(UpperCAmelCase__ )
__lowercase = re.escape(UpperCAmelCase__ )
__lowercase = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""", UpperCAmelCase__, re.IGNORECASE )
if content is not None:
__lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowercase = self.tokenajson(UpperCAmelCase__, is_inner_value=UpperCAmelCase__, added_vocab=UpperCAmelCase__ )
if value:
if len(UpperCAmelCase__ ) == 1:
__lowercase = value[0]
__lowercase = value
else: # leaf nodes
__lowercase = []
for leaf in content.split(r"<sep/>" ):
__lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase__ )
if len(output[key] ) == 1:
__lowercase = output[key][0]
__lowercase = tokens[tokens.find(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=UpperCAmelCase__, added_vocab=UpperCAmelCase__ )
if len(UpperCAmelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", UpperCAmelCase__, )
return self.image_processor_class
@property
def _lowercase ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", UpperCAmelCase__, )
return self.image_processor
| 17
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCAmelCase ( __snake_case : str , __snake_case : complex , __snake_case : str = "x" , __snake_case : float = 10**-10 , __snake_case : int = 1 , ) -> complex:
__A : int = symbols(__snake_case )
__A : Tuple = lambdify(__snake_case , __snake_case )
__A : Any = lambdify(__snake_case , diff(__snake_case , __snake_case ) )
__A : str = starting_point
while True:
if diff_function(__snake_case ) != 0:
__A : Optional[Any] = prev_guess - multiplicity * func(__snake_case ) / diff_function(
__snake_case )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 190
| 0
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( lowerCamelCase_ :str = "isbn/0140328726" ):
'''simple docstring'''
snake_case_ : Tuple = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
snake_case_ : int = F'''{olid} is not a valid Open Library olid'''
raise ValueError(lowerCamelCase_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( lowerCamelCase_ :dict ):
'''simple docstring'''
snake_case_ : List[str] = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
snake_case_ : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
snake_case_ : List[Any] = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
snake_case_ : Optional[int] = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : str = """, """.join(lowerCamelCase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A : Any = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__A : Any = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print('\n'.join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 363
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,):
snake_case_ : str = parent
snake_case_ : int = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : List[str] = use_labels
snake_case_ : int = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = projection_dim
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : int = dropout
snake_case_ : int = attention_dropout
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Dict = scope
snake_case_ : Union[str, Any] = bos_token_id
def a__ ( self :Any ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : Union[str, Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case_ : int = input_mask.numpy()
snake_case_ , snake_case_ : Tuple = input_mask.shape
snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = 0
snake_case_ : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCamelCase )
def a__ ( self :str ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ):
snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase )
snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
lowercase : int = False
lowercase : List[Any] = False
lowercase : Dict = False
def a__ ( self :List[Any] ):
snake_case_ : List[str] = BlipTextModelTester(self )
snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Union[str, Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Tuple ):
pass
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def a__ ( self :Any ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :List[Any] ):
pass
@slow
def a__ ( self :Any ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
| 8
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowercase = logging.getLogger(__name__)
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : List[str]
UpperCAmelCase_ : Optional[List[str]]
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''train'''
UpperCAmelCase_ : Tuple = '''dev'''
UpperCAmelCase_ : Union[str, Any] = '''test'''
class a__:
'''simple docstring'''
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def a_ ( __lowerCAmelCase):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase=1 , __lowerCAmelCase="[SEP]" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=-100 , __lowerCAmelCase=0 , __lowerCAmelCase=True , ):
"""simple docstring"""
lowerCAmelCase = {label: i for i, label in enumerate(__lowerCAmelCase)}
lowerCAmelCase = []
for ex_index, example in enumerate(__lowerCAmelCase):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" , __lowerCAmelCase , len(__lowerCAmelCase))
lowerCAmelCase = []
lowerCAmelCase = []
for word, label in zip(example.words , example.labels):
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowerCAmelCase) > 0:
tokens.extend(__lowerCAmelCase)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowerCAmelCase) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(__lowerCAmelCase) > max_seq_length - special_tokens_count:
lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase = [sequence_a_segment_id] * len(__lowerCAmelCase)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase = [cls_token] + tokens
lowerCAmelCase = [pad_token_label_id] + label_ids
lowerCAmelCase = [cls_token_segment_id] + segment_ids
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(__lowerCAmelCase)
# Zero-pad up to the sequence length.
lowerCAmelCase = max_seq_length - len(__lowerCAmelCase)
if pad_on_left:
lowerCAmelCase = ([pad_token] * padding_length) + input_ids
lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowerCAmelCase) == max_seq_length
assert len(__lowerCAmelCase) == max_seq_length
assert len(__lowerCAmelCase) == max_seq_length
assert len(__lowerCAmelCase) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""")
logger.info("""guid: %s""" , example.guid)
logger.info("""tokens: %s""" , """ """.join([str(__lowerCAmelCase) for x in tokens]))
logger.info("""input_ids: %s""" , """ """.join([str(__lowerCAmelCase) for x in input_ids]))
logger.info("""input_mask: %s""" , """ """.join([str(__lowerCAmelCase) for x in input_mask]))
logger.info("""segment_ids: %s""" , """ """.join([str(__lowerCAmelCase) for x in segment_ids]))
logger.info("""label_ids: %s""" , """ """.join([str(__lowerCAmelCase) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , label_ids=__lowerCAmelCase))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
UpperCAmelCase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = Split.train , ):
"""simple docstring"""
lowerCAmelCase = os.path.join(
__lowerCAmelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(__lowerCAmelCase)) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__lowerCAmelCase):
if os.path.exists(__lowerCAmelCase) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}")
lowerCAmelCase = torch.load(__lowerCAmelCase)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
lowerCAmelCase = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase)
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}")
torch.save(self.features , __lowerCAmelCase)
def __len__( self):
"""simple docstring"""
return len(self.features)
def __getitem__( self , __lowerCAmelCase):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__:
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
UpperCAmelCase_ : int = -1_0_0
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = Split.train , ):
"""simple docstring"""
lowerCAmelCase = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase)
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None]), """attention_mask""": tf.TensorShape([None])},
tf.TensorShape([None]),
) , )
else:
lowerCAmelCase = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None]),
"""attention_mask""": tf.TensorShape([None]),
"""token_type_ids""": tf.TensorShape([None]),
},
tf.TensorShape([None]),
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self):
"""simple docstring"""
return len(self.features)
def __getitem__( self , __lowerCAmelCase):
"""simple docstring"""
return self.features[i]
| 272
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 1
|
def __lowerCamelCase ( __a :int , __a :int ) -> float:
"""simple docstring"""
return base * power(__a , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
lowerCamelCase__ : Union[str, Any] = int(input('''Enter the base: ''').strip())
lowerCamelCase__ : Union[str, Any] = int(input('''Enter the exponent: ''').strip())
lowerCamelCase__ : Optional[int] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCamelCase__ : Dict = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 350
|
import math
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = input("""Enter message: """ )
A__ = int(input(F'Enter key [2-{len(__a ) - 1}]: ' ) )
A__ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
A__ = encrypt_message(__a , __a )
elif mode.lower().startswith("""d""" ):
A__ = decrypt_message(__a , __a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = [""""""] * key
for col in range(__a ):
A__ = col
while pointer < len(__a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__a )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = math.ceil(len(__a ) / key )
A__ = key
A__ = (num_cols * num_rows) - len(__a )
A__ = [""""""] * num_cols
A__ = 0
A__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A__ = 0
row += 1
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 276
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCamelCase : int = False
@skip_mps
class SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionAttendAndExcitePipeline
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(UpperCamelCase__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=0 ):
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
else:
UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCamelCase = UpperCamelCase = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu'
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
UpperCamelCase = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
def A ( self : Any ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A ( self : int ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def A ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A ( self : Optional[int] ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A ( self : List[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
@classmethod
def A ( cls : Any ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(5_1 )
UpperCamelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase = 'a painting of an elephant with glasses'
UpperCamelCase = [5, 7]
UpperCamelCase = pipe(
prompt=UpperCamelCase__ , token_indices=UpperCamelCase__ , guidance_scale=7.5 , generator=UpperCamelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 28
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = ()
UpperCAmelCase_ : Tuple = {} if is_torch_available() else {}
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@require_torch
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions''']
lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338
| 0
|
"""simple docstring"""
import math
import sys
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = ''
try:
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as binary_file:
lowerCAmelCase__ :str = binary_file.read()
for dat in data:
lowerCAmelCase__ :Tuple = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = {'0': '0', '1': '1'}
lowerCAmelCase__ , lowerCAmelCase__ :Any = '', ''
lowerCAmelCase__ :Dict = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ :List[Any] = lexicon[curr_string]
result += last_match_id
lowerCAmelCase__ :Optional[Any] = last_match_id + '0'
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
lowerCAmelCase__ :int = {}
for curr_key in list(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = lexicon.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = new_lex
lowerCAmelCase__ :str = last_match_id + '1'
index += 1
lowerCAmelCase__ :int = ''
return result
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Tuple = 8
try:
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as opened_file:
lowerCAmelCase__ :int = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase__ :Optional[int] = data_bits[counter:]
lowerCAmelCase__ :Dict = data_bits[counter + 1 :]
return data_bits
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :List[str] = read_file_binary(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = remove_prefix(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = decompress_data(_SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 254
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :int = """swin"""
__magic_name__ :Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :str = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[Any] = window_size
lowerCAmelCase__ :List[Any] = mlp_ratio
lowerCAmelCase__ :int = qkv_bias
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :Dict = use_absolute_embeddings
lowerCAmelCase__ :int = layer_norm_eps
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :str = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :str = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 254
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Dict =get_tests_dir("""fixtures/test_sentencepiece.model""")
__lowerCAmelCase : str ={"target_lang": "fi", "source_lang": "en"}
__lowerCAmelCase : Tuple =">>zh<<"
__lowerCAmelCase : int ="Helsinki-NLP/"
if is_torch_available():
__lowerCAmelCase : Any ="pt"
elif is_tf_available():
__lowerCAmelCase : List[str] ="tf"
else:
__lowerCAmelCase : Optional[Any] ="jax"
@require_sentencepiece
class _A ( lowercase__ , unittest.TestCase ):
snake_case__ : Dict = MarianTokenizer
snake_case__ : str = False
snake_case__ : Tuple = True
def A__ ( self ):
"""simple docstring"""
super().setUp()
lowercase = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase = Path(self.tmpdirname )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def A__ ( self ):
"""simple docstring"""
lowercase = "</s>"
lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__lowerCAmelCase ) , 9 )
def A__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A__ ( self ):
"""simple docstring"""
lowercase = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(__lowerCAmelCase , batch.input_ids[0] )
lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCAmelCase )
lowercase = [x.name for x in Path(__lowerCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __lowerCAmelCase )
MarianTokenizer.from_pretrained(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def A__ ( self ):
"""simple docstring"""
lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowercase = "Tämä on testi"
lowercase = "This is a test"
lowercase = [76, 7, 2047, 2]
lowercase = [69, 12, 11, 940, 2]
lowercase = tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokenizer(text_target=__lowerCAmelCase ).input_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 197
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
self.events.append("on_init_end" )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int , **__lowerCamelCase : Dict ):
"""simple docstring"""
self.events.append("on_train_begin" )
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
self.events.append("on_train_end" )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
self.events.append("on_epoch_begin" )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , **__lowerCamelCase : int ):
"""simple docstring"""
self.events.append("on_epoch_end" )
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
self.events.append("on_step_begin" )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , **__lowerCamelCase : str ):
"""simple docstring"""
self.events.append("on_step_end" )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict , **__lowerCamelCase : Tuple ):
"""simple docstring"""
self.events.append("on_evaluate" )
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , **__lowerCamelCase : int ):
"""simple docstring"""
self.events.append("on_predict" )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
self.events.append("on_save" )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
self.events.append("on_log" )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
self.events.append("on_prediction_step" )
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Any=6_4 , __lowerCamelCase : Optional[Any]=6_4 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=False , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RegressionDataset(length=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = RegressionDataset(length=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = RegressionModelConfig(a=__lowerCamelCase , b=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = RegressionPreTrainedModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = TrainingArguments(self.output_dir , disable_tqdm=__lowerCamelCase , report_to=[] , **__lowerCamelCase )
return Trainer(
__lowerCamelCase , __lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , callbacks=__lowerCamelCase , )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
"""simple docstring"""
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
# Order doesn't matter
_SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : cb.__name__ if isinstance(__lowerCamelCase , __lowerCamelCase ) else cb.__class__.__name__ )
_SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : cb.__name__ if isinstance(__lowerCamelCase , __lowerCamelCase ) else cb.__class__.__name__ )
for cba, cba in zip(__lowerCamelCase , __lowerCamelCase ):
if isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(__lowerCamelCase , cba.__class__ )
elif not isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(cba.__class__ , __lowerCamelCase )
else:
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( self : str , __lowerCamelCase : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["on_init_end", "on_train_begin"]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(trainer.get_eval_dataloader() )
_SCREAMING_SNAKE_CASE = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(__lowerCamelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.get_trainer()
_SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
# Callbacks passed at init are added to the default callbacks
_SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_SCREAMING_SNAKE_CASE = self.get_trainer(disable_tqdm=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_SCREAMING_SNAKE_CASE = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__lowerCamelCase )
expected_callbacks.remove(__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.get_trainer()
_SCREAMING_SNAKE_CASE = trainer.pop_callback(__lowerCamelCase )
self.assertEqual(cb.__class__ , __lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
trainer.add_callback(__lowerCamelCase )
expected_callbacks.insert(0 , __lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
# We can also add, pop, or remove by instance
_SCREAMING_SNAKE_CASE = self.get_trainer()
_SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__lowerCamelCase )
expected_callbacks.remove(__lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.get_trainer()
_SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[0]
_SCREAMING_SNAKE_CASE = trainer.pop_callback(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
trainer.add_callback(__lowerCamelCase )
expected_callbacks.insert(0 , __lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase , self.get_expected_events(__lowerCamelCase ) )
# Independent log/save/eval
_SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase , self.get_expected_events(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase , self.get_expected_events(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase , self.get_expected_events(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase , self.get_expected_events(__lowerCamelCase ) )
# A bit of everything
_SCREAMING_SNAKE_CASE = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__lowerCamelCase , self.get_expected_events(__lowerCamelCase ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_SCREAMING_SNAKE_CASE = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__lowerCamelCase ) in warn_mock.call_args[0][0]
| 353
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Tuple , __A : List[str] , __A : List[str] ) -> List[Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES
else:
_SCREAMING_SNAKE_CASE = {tokenizer_name: getattr(__A , tokenizer_name + "Fast" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES[tokenizer_name]
_SCREAMING_SNAKE_CASE = True
if checkpoint_name is None:
_SCREAMING_SNAKE_CASE = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_SCREAMING_SNAKE_CASE = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(__A , force_download=__A )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = checkpoint.split("/" )
_SCREAMING_SNAKE_CASE = os.path.join(__A , __A )
elif add_prefix:
_SCREAMING_SNAKE_CASE = checkpoint
_SCREAMING_SNAKE_CASE = dump_path
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_SCREAMING_SNAKE_CASE = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_SCREAMING_SNAKE_CASE = file_path.split(__A )[-1][0]
if next_char == "/":
_SCREAMING_SNAKE_CASE = os.path.join(__A , __A )
_SCREAMING_SNAKE_CASE = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_SCREAMING_SNAKE_CASE = tokenizer.save_pretrained(
__A , legacy_format=__A , filename_prefix=__A )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__A )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowerCamelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 111
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__snake_case = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__snake_case = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__snake_case = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a ( __a , __a ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Dict = simple_accuracy(__a , __a )
UpperCamelCase__ :List[str] = float(fa_score(y_true=__a , y_pred=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :int = float(pearsonr(__a , __a )[0] )
UpperCamelCase__ :List[str] = float(spearmanr(__a , __a )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 97
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 97
| 1
|
import warnings
from ..trainer import Trainer
from ..utils import logging
snake_case : List[str] = logging.get_logger(__name__)
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase=None , **_lowerCamelCase ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _lowerCamelCase , )
super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
| 355
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Dict = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'data2vec-vision'
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=[3, 5, 7, 11] , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Tuple = hidden_size
a :Any = num_hidden_layers
a :Optional[int] = num_attention_heads
a :Dict = intermediate_size
a :List[Any] = hidden_act
a :List[str] = hidden_dropout_prob
a :Union[str, Any] = attention_probs_dropout_prob
a :Any = initializer_range
a :Any = layer_norm_eps
a :Union[str, Any] = image_size
a :int = patch_size
a :Optional[int] = num_channels
a :Union[str, Any] = use_mask_token
a :Optional[Any] = use_absolute_position_embeddings
a :Tuple = use_relative_position_bias
a :List[Any] = use_shared_relative_position_bias
a :Dict = layer_scale_init_value
a :Optional[int] = drop_path_rate
a :List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
a :str = out_indices
a :Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
a :List[Any] = use_auxiliary_head
a :List[Any] = auxiliary_loss_weight
a :Optional[int] = auxiliary_channels
a :List[str] = auxiliary_num_convs
a :str = auxiliary_concat_input
a :Union[str, Any] = semantic_loss_ignore_index
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-4
| 281
| 0
|
"""simple docstring"""
import os
def _lowerCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(_UpperCamelCase ) + "/p022_names.txt" ) as file:
__lowerCAmelCase = str(file.readlines()[0] )
__lowerCAmelCase = names.replace("\"" , "" ).split("," )
names.sort()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i, name in enumerate(_UpperCamelCase ):
for letter in name:
name_score += ord(_UpperCamelCase ) - 64
total_score += (i + 1) * name_score
__lowerCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 57
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , _snake_case : List[str] , _snake_case : List[Any]=13 , _snake_case : Dict=3 , _snake_case : List[Any]=224 , _snake_case : Any=30 , _snake_case : Optional[Any]=400 , _snake_case : Union[str, Any]=True , _snake_case : Tuple=None , _snake_case : Optional[Any]=True , _snake_case : Dict=[0.5, 0.5, 0.5] , _snake_case : str=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = EfficientFormerImageProcessorTester(self)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean'''))
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std'''))
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize'''))
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize'''))
self.assertTrue(hasattr(_lowerCAmelCase , '''size'''))
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_lowerCAmelCase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_lowerCAmelCase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processor(_lowerCAmelCase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 353
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case_ = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
snake_case_ = {
"""yjernite/retribert-base-uncased""": 512,
}
snake_case_ = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = RetriBertTokenizer
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :str , lowercase_ :List[str]=None , lowercase_ :List[str]=None , lowercase_ :str=True , lowercase_ :List[str]="[UNK]" , lowercase_ :Optional[Any]="[SEP]" , lowercase_ :Tuple="[PAD]" , lowercase_ :int="[CLS]" , lowercase_ :Optional[Any]="[MASK]" , lowercase_ :str=True , lowercase_ :Dict=None , **lowercase_ :List[Any] , ) -> Union[str, Any]:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase_ ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowercase_ , normalizer_state.pop('type' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowercase_ )
UpperCAmelCase = do_lower_case
def UpperCAmelCase__ ( self :Any , lowercase_ :Any , lowercase_ :Tuple=None ) -> Union[str, Any]:
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 78
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: str = []
lowercase__: str = 1_1
lowercase__: str = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCAmelCase , __UpperCAmelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: Dict = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: List[str] = 1.0
for fraction in fraction_list(__UpperCAmelCase ):
lowercase__: List[str] = Fraction(__UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 177
| 0
|
"""simple docstring"""
def snake_case ( A__ ):
if edge <= 0 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case ( A__ ):
if edge <= 0 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = "ZinengTang/tvlt-base"
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : int ) -> List[str]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[str] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = np.ones([12_000] )
UpperCAmelCase_ : Dict = feature_extractor(lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : List[Any] = processor(audio=lowerCAmelCase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : str = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.ones([3, 224, 224] )
UpperCAmelCase_ : Union[str, Any] = image_processor(lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : List[str] = processor(images=lowerCAmelCase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : str = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = np.ones([12_000] )
UpperCAmelCase_ : int = np.ones([3, 224, 224] )
UpperCAmelCase_ : Union[str, Any] = processor(audio=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 253
| 0
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> np.ndarray:
lowercase__ : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
lowercase__ : Optional[int] = np.zeros((n + 1,) )
lowercase__ : List[Any] = ya
lowercase__ : Optional[Any] = xa
for k in range(__lowerCamelCase ):
lowercase__ : str = y[k] + step_size * ode_func(__lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : List[Any] = logging.get_logger(__name__)
_A : Any = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( a__ ):
_UpperCAmelCase : List[Any] = "table-transformer"
_UpperCAmelCase : Dict = ["past_key_values"]
_UpperCAmelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , A : Union[str, Any]=True , A : Dict=None , A : int=3 , A : Any=1_0_0 , A : List[Any]=6 , A : Tuple=2_0_4_8 , A : Any=8 , A : Dict=6 , A : Tuple=2_0_4_8 , A : int=8 , A : Optional[int]=0.0 , A : List[Any]=0.0 , A : List[Any]=True , A : Optional[int]="relu" , A : Union[str, Any]=2_5_6 , A : Any=0.1 , A : Tuple=0.0 , A : Optional[int]=0.0 , A : str=0.02 , A : Tuple=1.0 , A : Dict=False , A : str="sine" , A : str="resnet50" , A : Any=True , A : List[str]=False , A : Any=1 , A : int=5 , A : Tuple=2 , A : Optional[int]=1 , A : Any=1 , A : Dict=5 , A : str=2 , A : Union[str, Any]=0.1 , **A : int , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCamelCase__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = backbone_config.get('''model_type''' )
lowerCamelCase__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
lowerCamelCase__ : Union[str, Any] = None, None, None
lowerCamelCase__ : Optional[Any] = use_timm_backbone
lowerCamelCase__ : Optional[int] = backbone_config
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : Dict = num_queries
lowerCamelCase__ : str = d_model
lowerCamelCase__ : List[str] = encoder_ffn_dim
lowerCamelCase__ : int = encoder_layers
lowerCamelCase__ : Optional[Any] = encoder_attention_heads
lowerCamelCase__ : List[str] = decoder_ffn_dim
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : List[str] = decoder_attention_heads
lowerCamelCase__ : Tuple = dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : Any = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : Any = init_xavier_std
lowerCamelCase__ : List[Any] = encoder_layerdrop
lowerCamelCase__ : int = decoder_layerdrop
lowerCamelCase__ : Any = encoder_layers
lowerCamelCase__ : List[str] = auxiliary_loss
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : Optional[Any] = backbone
lowerCamelCase__ : Tuple = use_pretrained_backbone
lowerCamelCase__ : List[Any] = dilation
# Hungarian matcher
lowerCamelCase__ : List[str] = class_cost
lowerCamelCase__ : str = bbox_cost
lowerCamelCase__ : Union[str, Any] = giou_cost
# Loss coefficients
lowerCamelCase__ : Any = mask_loss_coefficient
lowerCamelCase__ : Optional[int] = dice_loss_coefficient
lowerCamelCase__ : Dict = bbox_loss_coefficient
lowerCamelCase__ : int = giou_loss_coefficient
lowerCamelCase__ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self : Any ) ->Dict:
return self.d_model
class __SCREAMING_SNAKE_CASE ( a__ ):
_UpperCAmelCase : Union[str, Any] = version.parse("1.11" )
@property
def __lowerCamelCase ( self : Tuple ) ->List[str]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
return 1e-5
@property
def __lowerCamelCase ( self : str ) ->List[Any]:
return 1_2
| 354
|
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
lowerCamelCase__ : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ) -> Optional[int]:
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''''''
else:
_snake_case = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Tuple:
_snake_case = dct.pop(__lowerCamelCase )
_snake_case = val
def _UpperCAmelCase ( ) -> Dict:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> str:
_snake_case = DeiTConfig()
# all deit models have fine-tuned heads
_snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_snake_case = 10_00
_snake_case = '''huggingface/label-files'''
_snake_case = '''imagenet-1k-id2label.json'''
_snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = int(deit_name[-6:-4] )
_snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_snake_case = 1_92
_snake_case = 7_68
_snake_case = 12
_snake_case = 3
elif deit_name[9:].startswith('''small''' ):
_snake_case = 3_84
_snake_case = 15_36
_snake_case = 12
_snake_case = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# load original model from timm
_snake_case = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = timm_model.state_dict()
_snake_case = create_rename_keys(__lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
_snake_case = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_snake_case = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_snake_case = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size )
_snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
_snake_case = encoding['''pixel_values''']
_snake_case = model(__lowerCamelCase )
_snake_case = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 288
| 0
|
from math import loga
def UpperCamelCase ( _A ):
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_A, _A ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138
|
import os
from pathlib import Path
def UpperCamelCase ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
__magic_name__ : Dict = Path(_A ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__magic_name__ : Optional[int] = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""", """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""", """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""", _A, with_cuda=_A, extra_include_paths=[str(_A )], extra_cflags=["""-DWITH_CUDA=1"""], extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
], )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 138
| 1
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCamelCase_ : Dict = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A=False , __A=False , __A=False , __A=None , __A=None , __A=None , __A=None , __A = None , **__A , ) -> None:
a ={} if sp_model_kwargs is None else sp_model_kwargs
a =kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
a ='''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
a ='''<|endoftext|>''' if eos_token is None else eos_token
a ='''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
a =unk_token if pad_token is None else pad_token
a =eos_token if bos_token is None else bos_token
else:
a ='''<pad>''' if pad_token is None else pad_token
a ='''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
a =do_lower_case
a =remove_space
a =keep_accents
a =vocab_file
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
a ={''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
a =re.compile(
f'''[{"".join(map(__A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self ) -> List[Any]:
a =self.__dict__.copy()
a =None
return state
def __setstate__( self , __A ) -> Tuple:
a =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a ={}
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self ) -> int:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =self.non_printing_characters_re.sub('''''' , __A )
# Normalize whitespaces
a =''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
a =unicodedata.normalize('''NFC''' , __A )
return text
def SCREAMING_SNAKE_CASE ( self , __A , **__A ) -> List[str]:
a =self.preprocess_text(__A )
return self.sp_model.encode(__A , out_type=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
return self.sp_model.PieceToId(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
return self.sp_model.IdToPiece(__A )
@staticmethod
def SCREAMING_SNAKE_CASE ( __A ) -> str:
return out_string
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =[]
a =''''''
a =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
a =True
a =[]
else:
current_sub_tokens.append(__A )
a =False
out_string += self.sp_model.decode(__A )
return out_string
def SCREAMING_SNAKE_CASE ( self ) -> Dict[str, int]:
a ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
a =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , __A , __A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__A , __A ):
a =self.preprocess_text(__A )
a =self.sp_model.encode(__A )
else:
a =[self.preprocess_text(__A ) for t in text]
a =self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
a =torch.tensor(__A )
return token_ids
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
return self.sp_model.decode(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[int]:
a =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
a =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__A )
| 81
|
from torch import nn
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 147
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 42
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : list[list[Edge]] = [[] for _ in range(lowercase_ )]
_snake_case : Union[str, Any] = size
def __getitem__( self , lowercase_ ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase ( self ):
return self._size
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = deque([start_vertex] )
_snake_case : list[int | None] = [None] * self.size
_snake_case : Tuple = 0
while queue:
_snake_case : List[Any] = queue.popleft()
_snake_case : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case : Dict = current_distance + edge.weight
_snake_case : str = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
_snake_case : List[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
def snake_case (__lowercase ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
_snake_case : Any = [True] * (num + 1)
_snake_case : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowercase ):
_snake_case : Optional[int] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Any = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 284
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __snake_case ( a ):
UpperCAmelCase__ : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "transcription"
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Tuple):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""")
if not isinstance(features[self.audio_column] , _snake_case):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""")
UpperCAmelCase_ = copy.deepcopy(self)
UpperCAmelCase_ = self.input_schema.copy()
UpperCAmelCase_ = features[self.audio_column]
UpperCAmelCase_ = input_schema
return task_template
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 51
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
for part_id in partition_order:
UpperCamelCase_ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""").collect()
for row_idx, row in enumerate(_lowerCAmelCase):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(2)
UpperCamelCase_ = [1, 0]
UpperCamelCase_ = _generate_iterable_examples(_lowerCAmelCase , _lowerCAmelCase) # Reverse the partitions.
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , _lowerCAmelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(1)
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
UpperCamelCase_ = lambda _lowerCAmelCase: x.reverse()
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [2, 1, 0])
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shuffle_data_sources(_lowerCAmelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(20).repartition(4)
# Partitions 0 and 2
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 128
| 0
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = PegasusTokenizer
UpperCAmelCase__ : Tuple = PegasusTokenizerFast
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[int] = True
def _a ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase =PegasusTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ) -> Union[str, Any]:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _a ( self , **A_ ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
return ("This is a test", "This is a test")
def _a ( self ) -> List[Any]:
__UpperCamelCase ='</s>'
__UpperCamelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(A_ ) , 1103 )
def _a ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _a ( self ) -> int:
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase =self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase =(
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__UpperCamelCase =rust_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
__UpperCamelCase =py_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCamelCase ='<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__UpperCamelCase =[2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
__UpperCamelCase =tokenizer([raw_input_str] , return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__UpperCamelCase ='To ensure a smooth flow of bank resolutions.'
__UpperCamelCase =[413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
__UpperCamelCase =tokenizer([raw_input_str] , return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _a ( self ) -> List[Any]:
__UpperCamelCase =['This is going to be way too long.' * 150, 'short example']
__UpperCamelCase =['not super long but more than 5 tokens', 'tiny']
__UpperCamelCase =self._large_tokenizer(A_ , padding=A_ , truncation=A_ , return_tensors='pt' )
__UpperCamelCase =self._large_tokenizer(
text_target=A_ , max_length=5 , padding=A_ , truncation=A_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
@slow
def _a ( self ) -> Dict:
# fmt: off
__UpperCamelCase ={'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = PegasusTokenizer
UpperCAmelCase__ : Optional[Any] = PegasusTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Union[str, Any] = True
def _a ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase =PegasusTokenizer(A_ , offset=0 , mask_token_sent=A_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ) -> Union[str, Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _a ( self , **A_ ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Optional[Any]:
return ("This is a test", "This is a test")
def _a ( self ) -> int:
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase =self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase =(
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__UpperCamelCase =rust_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
__UpperCamelCase =py_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
@require_torch
def _a ( self ) -> int:
__UpperCamelCase =['This is going to be way too long.' * 1000, 'short example']
__UpperCamelCase =['not super long but more than 5 tokens', 'tiny']
__UpperCamelCase =self._large_tokenizer(A_ , padding=A_ , truncation=A_ , return_tensors='pt' )
__UpperCamelCase =self._large_tokenizer(
text_target=A_ , max_length=5 , padding=A_ , truncation=A_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
def _a ( self ) -> List[str]:
__UpperCamelCase =(
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__UpperCamelCase =self._large_tokenizer(A_ ).input_ids
self.assertListEqual(
A_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 117
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
super().__init__()
__UpperCamelCase =torchvision.models.resnetaaa(pretrained=A_ )
__UpperCamelCase =list(model.children() )[:-2]
__UpperCamelCase =nn.Sequential(*A_ )
__UpperCamelCase =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self , A_ ) -> int:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__UpperCamelCase =self.pool(self.model(A_ ) )
__UpperCamelCase =torch.flatten(A_ , start_dim=2 )
__UpperCamelCase =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =[json.loads(A_ ) for l in open(A_ )]
__UpperCamelCase =os.path.dirname(A_ )
__UpperCamelCase =tokenizer
__UpperCamelCase =labels
__UpperCamelCase =len(A_ )
__UpperCamelCase =max_seq_length
__UpperCamelCase =transforms
def __len__( self ) -> Any:
return len(self.data )
def __getitem__( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =sentence[0], sentence[1:-1], sentence[-1]
__UpperCamelCase =sentence[: self.max_seq_length]
__UpperCamelCase =torch.zeros(self.n_classes )
__UpperCamelCase =1
__UpperCamelCase =Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
__UpperCamelCase =self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self ) -> List[str]:
__UpperCamelCase =Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =[len(row['sentence'] ) for row in batch]
__UpperCamelCase , __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =input_row['sentence']
__UpperCamelCase =1
__UpperCamelCase =torch.stack([row['image'] for row in batch] )
__UpperCamelCase =torch.stack([row['label'] for row in batch] )
__UpperCamelCase =torch.stack([row['image_start_token'] for row in batch] )
__UpperCamelCase =torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _UpperCAmelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _UpperCAmelCase ( ):
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 117
| 1
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A: List[str] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Any ):
for attribute in key.split(""".""" ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
UpperCAmelCase : str = value
else:
UpperCAmelCase : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = fairseq_model.state_dict()
UpperCAmelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Dict = True
if "*" in mapped_key:
UpperCAmelCase : str = name.split(UpperCamelCase )[0].split(""".""" )[-2]
UpperCAmelCase : Tuple = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
UpperCAmelCase : Any = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : Optional[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : str = """weight"""
else:
UpperCAmelCase : Optional[Any] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Any ):
UpperCAmelCase : str = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Dict = name.split(""".""" )
UpperCAmelCase : List[str] = int(items[0] )
UpperCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _snake_case ( UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=None ):
# load the pre-trained checkpoints
UpperCAmelCase : List[Any] = torch.load(UpperCamelCase )
UpperCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCAmelCase : Optional[int] = WavLMOrig(UpperCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(UpperCamelCase )
else:
UpperCAmelCase : List[Any] = WavLMConfig()
UpperCAmelCase : Any = WavLMModel(UpperCamelCase )
recursively_load_weights(UpperCamelCase , UpperCamelCase )
hf_wavlm.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A: Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 109
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : int ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a ( self : List[Any] ) -> Any:
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(SCREAMING_SNAKE_CASE__ ):
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] )
def a ( self : Tuple ) -> List[str]:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a ( self : List[str] ) -> List[str]: # checks what happens with missing columns
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a ( self : Dict ) -> Optional[int]: # checks if the type can be inferred from the second record
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 229
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase (__a ):
"""simple docstring"""
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a__, scheduler=a__ )
@torch.no_grad()
def __call__( self : Tuple, _UpperCAmelCase : int = 1, _UpperCAmelCase : int = 1_0_0, _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, _UpperCAmelCase : Optional[float] = None, _UpperCAmelCase : bool = True, ) -> Any:
"""simple docstring"""
if audio_length_in_s is None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
SCREAMING_SNAKE_CASE__ : int = audio_length_in_s * self.unet.config.sample_rate
SCREAMING_SNAKE_CASE__ : Dict = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
SCREAMING_SNAKE_CASE__ : int = int(a__ )
if sample_size % down_scale_factor != 0:
SCREAMING_SNAKE_CASE__ : List[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
SCREAMING_SNAKE_CASE__ : Dict = int(a__ )
SCREAMING_SNAKE_CASE__ : Any = next(iter(self.unet.parameters() ) ).dtype
SCREAMING_SNAKE_CASE__ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a__, a__ ) and len(a__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = randn_tensor(a__, generator=a__, device=self.device, dtype=a__ )
# set step values
self.scheduler.set_timesteps(a__, device=audio.device )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.timesteps.to(a__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ : List[str] = self.unet(a__, a__ ).sample
# 2. compute previous image: x_t -> t_t-1
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.step(a__, a__, a__ ).prev_sample
SCREAMING_SNAKE_CASE__ : List[str] = audio.clamp(-1, 1 ).float().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Optional[int] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a__ )
| 360
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "AutoTokenizer"
UpperCAmelCase_ = ["tokenizer"]
UpperCAmelCase_ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = speaker_embeddings
@classmethod
def A_ ( cls : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : Dict="speaker_embeddings_path.json", **_UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE__ : Any = get_file_from_repo(
_UpperCAmelCase, _UpperCAmelCase, subfolder=kwargs.pop("subfolder", _UpperCAmelCase ), cache_dir=kwargs.pop("cache_dir", _UpperCAmelCase ), force_download=kwargs.pop("force_download", _UpperCAmelCase ), proxies=kwargs.pop("proxies", _UpperCAmelCase ), resume_download=kwargs.pop("resume_download", _UpperCAmelCase ), local_files_only=kwargs.pop("local_files_only", _UpperCAmelCase ), use_auth_token=kwargs.pop("use_auth_token", _UpperCAmelCase ), revision=kwargs.pop("revision", _UpperCAmelCase ), )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase, _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
SCREAMING_SNAKE_CASE__ : Dict = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase, speaker_embeddings=_UpperCAmelCase )
def A_ ( self : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[str]="speaker_embeddings_path.json", _UpperCAmelCase : Optional[Any]="speaker_embeddings", _UpperCAmelCase : bool = False, **_UpperCAmelCase : List[str], ) -> Union[str, Any]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase, _UpperCAmelCase, "v2" ), exist_ok=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE__ : List[Any] = self._load_voice_preset(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"], _UpperCAmelCase, F'''{prompt_key}_{key}''' ), voice_preset[key], allow_pickle=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_UpperCAmelCase, F'''{prompt_key}_{key}.npy''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_dict
with open(os.path.join(_UpperCAmelCase, _UpperCAmelCase ), "w" ) as fp:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], _UpperCAmelCase : str = None, **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
SCREAMING_SNAKE_CASE__ : List[Any] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path", "/" ), voice_preset_paths[key], subfolder=kwargs.pop("subfolder", _UpperCAmelCase ), cache_dir=kwargs.pop("cache_dir", _UpperCAmelCase ), force_download=kwargs.pop("force_download", _UpperCAmelCase ), proxies=kwargs.pop("proxies", _UpperCAmelCase ), resume_download=kwargs.pop("resume_download", _UpperCAmelCase ), local_files_only=kwargs.pop("local_files_only", _UpperCAmelCase ), use_auth_token=kwargs.pop("use_auth_token", _UpperCAmelCase ), revision=kwargs.pop("revision", _UpperCAmelCase ), )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path", "/" ), voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
SCREAMING_SNAKE_CASE__ : int = np.load(_UpperCAmelCase )
return voice_preset_dict
def A_ ( self : int, _UpperCAmelCase : Optional[dict] = None ) -> Optional[int]:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key], np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any], _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Optional[int]="pt", _UpperCAmelCase : List[str]=2_5_6, _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Any=False, **_UpperCAmelCase : List[str], ) -> List[Any]:
"""simple docstring"""
if voice_preset is not None and not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase, _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE__ : List[str] = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase, _UpperCAmelCase ) and not voice_preset.endswith(".npz" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = voice_preset + ".npz"
SCREAMING_SNAKE_CASE__ : List[str] = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(
_UpperCAmelCase, return_tensors=_UpperCAmelCase, padding="max_length", max_length=_UpperCAmelCase, return_attention_mask=_UpperCAmelCase, return_token_type_ids=_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, **_UpperCAmelCase, )
if voice_preset is not None:
SCREAMING_SNAKE_CASE__ : str = voice_preset
return encoded_text
| 191
| 0
|
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = """Tobias Carryer"""
from time import time
class __A :
"""simple docstring"""
def __init__( self , __A , __A , __A , __A=int(time() ) ) -> Optional[Any]: # noqa: B008
a =multiplier
a =increment
a =modulo
a =seed
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCamelCase_ : Optional[int] = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 81
|
"""simple docstring"""
import math
class lowerCamelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int:
snake_case_ :Any = 0.0
snake_case_ :Tuple = 0.0
for i in range(len(snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]:
for i in range(len(snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ :Optional[Any] = SelfOrganizingMap()
snake_case_ :Dict = 3
snake_case_ :Dict = 0.5
for _ in range(_lowercase ):
for j in range(len(_lowercase ) ):
# training sample
snake_case_ :List[Any] = training_samples[j]
# Compute the winning vector
snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase )
# Update the winning vector
snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase )
# classify test sample
snake_case_ :str = [0, 0, 0, 1]
snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 66
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,)
return model
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.dummy_uncond_unet
__lowercase = PNDMScheduler()
__lowercase = PNDMPipeline(unet=a_ ,scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
__lowercase = torch.manual_seed(0 )
__lowercase = pndm(generator=a_ ,num_inference_steps=20 ,output_type='''numpy''' ).images
__lowercase = torch.manual_seed(0 )
__lowercase = pndm(generator=a_ ,num_inference_steps=20 ,output_type='''numpy''' ,return_dict=a_ )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = '''google/ddpm-cifar10-32'''
__lowercase = UNetaDModel.from_pretrained(a_ )
__lowercase = PNDMScheduler()
__lowercase = PNDMPipeline(unet=a_ ,scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
__lowercase = torch.manual_seed(0 )
__lowercase = pndm(generator=a_ ,output_type='''numpy''' ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 352
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_SCREAMING_SNAKE_CASE = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = ''' Hello world! cécé herlolip'''
_SCREAMING_SNAKE_CASE = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] ):
__lowercase = dct.pop(lowerCamelCase_ )
__lowercase = val
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=None ):
if not os.path.exists(lowerCamelCase_ ):
__lowercase = torch.hub.load('''pytorch/fairseq''' , lowerCamelCase_ ).eval()
else:
__lowercase = load_xsum_checkpoint(lowerCamelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowercase = checkpoint_path.replace('''.''' , '''-''' )
__lowercase = BartConfig.from_pretrained(lowerCamelCase_ )
__lowercase = bart.encode(lowerCamelCase_ ).unsqueeze(0 )
__lowercase = BartTokenizer.from_pretrained(lowerCamelCase_ ).encode(lowerCamelCase_ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(lowerCamelCase_ , lowerCamelCase_ ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
__lowercase = bart.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = BartForSequenceClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = bart.predict('''mnli''' , lowerCamelCase_ , return_logits=lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ )[0] # logits
else: # no classification heads to worry about
__lowercase = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = bart.extract_features(lowerCamelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
__lowercase = BartModel(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ ).model[0]
else:
__lowercase = BartForConditionalGeneration(lowerCamelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase_ )
if hasattr(lowerCamelCase_ , '''lm_head''' ):
__lowercase = make_linear_from_emb(model.model.shared )
__lowercase = model.model(lowerCamelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 217
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> float:
lowercase__ : Optional[int] = 0
while len(__lowerCamelCase ) > 1:
lowercase__ : Optional[int] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowercase__ : List[Any] = files.index(min(__lowerCamelCase ) )
temp += files[min_index]
files.pop(__lowerCamelCase )
files.append(__lowerCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
UpperCAmelCase_ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(snake_case_ )
DownloadCommand.register_subcommand(snake_case_ )
EnvironmentCommand.register_subcommand(snake_case_ )
RunCommand.register_subcommand(snake_case_ )
ServeCommand.register_subcommand(snake_case_ )
UserCommands.register_subcommand(snake_case_ )
AddNewModelCommand.register_subcommand(snake_case_ )
AddNewModelLikeCommand.register_subcommand(snake_case_ )
LfsCommands.register_subcommand(snake_case_ )
PTtoTFCommand.register_subcommand(snake_case_ )
# Let's go
UpperCAmelCase_ = parser.parse_args()
if not hasattr(snake_case_ , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase_ = args.func(snake_case_ )
service.run()
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModel)
class __A ( _BaseAutoModelClass ):
a__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Tuple =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __A ( _BaseAutoModelClass ):
a__ : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_: Any =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_: int =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 106
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__snake_case : Optional[int] =logging.get_logger(__name__)
__snake_case : int ={
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCamelCase__ ( lowercase_):
'''simple docstring'''
snake_case_ ="""bloom"""
snake_case_ =["""past_key_values"""]
snake_case_ ={
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__(self ,__lowerCamelCase=25_08_80 ,__lowerCamelCase=64 ,__lowerCamelCase=2 ,__lowerCamelCase=8 ,__lowerCamelCase=1e-5 ,__lowerCamelCase=0.02 ,__lowerCamelCase=True ,__lowerCamelCase=1 ,__lowerCamelCase=2 ,__lowerCamelCase=False ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=1 ,__lowerCamelCase=False ,**__lowerCamelCase ,) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase__ : str = kwargs.pop('''n_embed''' ,__lowerCamelCase )
lowerCAmelCase__ : Any = hidden_size if n_embed is None else n_embed
lowerCAmelCase__ : Optional[Any] = n_layer
lowerCAmelCase__ : int = n_head
lowerCAmelCase__ : Dict = layer_norm_epsilon
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Union[str, Any] = use_cache
lowerCAmelCase__ : int = pretraining_tp
lowerCAmelCase__ : str = apply_residual_connection_post_layernorm
lowerCAmelCase__ : List[str] = hidden_dropout
lowerCAmelCase__ : int = attention_dropout
lowerCAmelCase__ : Dict = bos_token_id
lowerCAmelCase__ : Union[str, Any] = eos_token_id
lowerCAmelCase__ : Dict = slow_but_exact
super().__init__(bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
class lowerCamelCase__ ( lowercase_):
'''simple docstring'''
snake_case_ =version.parse("""1.12""")
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = "default" ,__lowerCamelCase = None ,__lowerCamelCase = False ,) -> Any:
"""simple docstring"""
super().__init__(__lowerCamelCase ,task=__lowerCamelCase ,patching_specs=__lowerCamelCase ,use_past=__lowerCamelCase )
if not getattr(self._config ,'''pad_token_id''' ,__lowerCamelCase ):
# TODO: how to do that better?
lowerCAmelCase__ : Tuple = 0
@property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__lowerCamelCase ,direction='''inputs''' ,inverted_values_shape=__lowerCamelCase )
lowerCAmelCase__ : Any = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
return self._config.n_layer
@property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return self._config.n_head
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return 1e-3
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = -1 ,__lowerCamelCase = -1 ,__lowerCamelCase = False ,__lowerCamelCase = None ,) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = super(__lowerCamelCase ,self ).generate_dummy_inputs(
__lowerCamelCase ,batch_size=__lowerCamelCase ,seq_length=__lowerCamelCase ,is_pair=__lowerCamelCase ,framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ : List[str] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : List[Any] = seqlen + 2
lowerCAmelCase__ : List[Any] = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase__ : List[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase__ : List[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase__ : Optional[Any] = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase__ : Any = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase__ : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : Optional[int] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowerCamelCase ,__lowerCamelCase ,dtype=__lowerCamelCase )] ,dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return 13
| 129
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 0
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 , __SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 264
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''vit'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Union[str, Any] = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Union[str, Any] = qkv_bias
lowercase_ : List[Any] = encoder_stride
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1E-4
| 264
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Any:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> int:
'''simple docstring'''
A__ = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
A__ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'sigmoid'
__lowerCamelCase = 'softmax'
__lowerCamelCase = 'none'
@add_end_docstrings(
snake_case , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = False
__lowerCamelCase = ClassificationFunction.NONE
def __init__( self , **lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCamelCase ( self , lowercase=None , lowercase=None , lowercase="" , **lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = tokenizer_kwargs
A__ = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
A__ = self.model.config.return_all_scores
if isinstance(lowercase , lowercase ) or top_k is None:
A__ = top_k
A__ = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , lowercase , )
if return_all_scores:
A__ = None
else:
A__ = 1
if isinstance(lowercase , lowercase ):
A__ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A__ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *lowercase , **lowercase ) -> Any:
'''simple docstring'''
A__ = super().__call__(*lowercase , **lowercase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A__ = "top_k" not in kwargs
if isinstance(args[0] , lowercase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCamelCase ( self , lowercase , **lowercase ) -> Dict[str, GenericTensor]:
'''simple docstring'''
A__ = self.framework
if isinstance(lowercase , lowercase ):
return self.tokenizer(**lowercase , return_tensors=lowercase , **lowercase )
elif isinstance(lowercase , lowercase ) and len(lowercase ) == 1 and isinstance(inputs[0] , lowercase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowercase , **lowercase )
elif isinstance(lowercase , lowercase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
return self.model(**lowercase )
def UpperCamelCase ( self , lowercase , lowercase=None , lowercase=1 , lowercase=True ) -> Tuple:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A__ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A__ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
A__ = self.model.config.function_to_apply
else:
A__ = ClassificationFunction.NONE
A__ = model_outputs["logits"][0]
A__ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A__ = sigmoid(lowercase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A__ = softmax(lowercase )
elif function_to_apply == ClassificationFunction.NONE:
A__ = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A__ = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(lowercase )
]
if not _legacy:
dict_scores.sort(key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k is not None:
A__ = dict_scores[:top_k]
return dict_scores
| 68
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
_lowercase: Optional[datasets.Features] = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
_lowercase: Tuple = PandasConfig
def lowercase__ ( self : Optional[Any] ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
_lowerCAmelCase = data_files
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , """rb""" ) as f:
_lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) )
yield i, self._cast_table(__snake_case )
| 70
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
__lowerCAmelCase , __lowerCAmelCase = coefficient_matrix.shape
__lowerCAmelCase , __lowerCAmelCase = constant_matrix.shape
if rowsa != colsa:
__lowerCAmelCase = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase)
if colsa != 1:
__lowerCAmelCase = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
__lowerCAmelCase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
__lowerCAmelCase = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(lowerCamelCase)} and {rowsa}"""
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''')
__lowerCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1)
__lowerCAmelCase , __lowerCAmelCase = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
__lowerCAmelCase = []
for row in range(lowerCamelCase):
__lowerCAmelCase = 0
for col in range(lowerCamelCase):
if col == row:
__lowerCAmelCase = table[row][col]
elif col == cols - 1:
__lowerCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowerCAmelCase = (temp + val) / denom
new_val.append(lowerCamelCase)
__lowerCAmelCase = new_val
return [float(lowerCamelCase) for i in new_val]
def __magic_name__( lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase = table.shape
__lowerCAmelCase = True
for i in range(0, lowerCamelCase):
__lowerCAmelCase = 0
for j in range(0, cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''')
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
self.assertTrue(isinstance(dc.token_ids , __lowercase ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case (self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase ):
DisjunctiveConstraint(__lowercase ) # fails here
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case (self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(__lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 9
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 226
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = """transfo-xl"""
__lowerCAmelCase : Optional[Any] = ["""mems"""]
__lowerCAmelCase : List[str] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , _lowerCamelCase : List[Any]=26_77_35 , _lowerCamelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _lowerCamelCase : str=10_24 , _lowerCamelCase : Union[str, Any]=10_24 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : int=64 , _lowerCamelCase : Optional[int]=40_96 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : str=False , _lowerCamelCase : Union[str, Any]=18 , _lowerCamelCase : Optional[Any]=16_00 , _lowerCamelCase : Optional[int]=10_00 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=0 , _lowerCamelCase : List[Any]=-1 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]="normal" , _lowerCamelCase : int=0.01 , _lowerCamelCase : List[str]=0.01 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=1E-5 , _lowerCamelCase : int=0 , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[Any] = vocab_size
A_ : str = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
A_ : str = [False] + [True] * len(self.cutoffs )
else:
A_ : str = [False] + [False] * len(self.cutoffs )
A_ : Optional[Any] = d_model
A_ : Dict = d_embed
A_ : List[str] = d_head
A_ : List[Any] = d_inner
A_ : Dict = div_val
A_ : int = pre_lnorm
A_ : Optional[Any] = n_layer
A_ : List[Any] = n_head
A_ : List[Any] = mem_len
A_ : Dict = same_length
A_ : Optional[Any] = attn_type
A_ : Any = clamp_len
A_ : Dict = sample_softmax
A_ : List[Any] = adaptive
A_ : Union[str, Any] = dropout
A_ : List[Any] = dropatt
A_ : Any = untie_r
A_ : Optional[int] = init
A_ : int = init_range
A_ : List[Any] = proj_init_std
A_ : Union[str, Any] = init_std
A_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a_ ( self : Any , _lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 167
| 0
|
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCAmelCase ) )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> bool:
'''simple docstring'''
if index == len(__lowerCAmelCase ):
return True
# Recursive Step
for i in range(__lowerCAmelCase ):
if valid_coloring(graph[index] , __lowerCAmelCase , __lowerCAmelCase ):
# Color current vertex
UpperCAmelCase : Optional[Any] =i
# Validate coloring
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ):
return True
# Backtrack
UpperCAmelCase : int =-1
return False
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> list[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[-1] * len(__lowerCAmelCase )
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 ):
return colored_vertices
return []
| 357
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : Any ='''rougeLsum'''
UpperCAmelCase : Optional[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( )-> Any:
'''simple docstring'''
UpperCAmelCase : str =['''rouge1''', '''rouge2''', '''rougeL''']
UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
UpperCAmelCase : Tuple =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : int =[
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
UpperCAmelCase : Any =[
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase )
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
UpperCAmelCase : Optional[Any] =[
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] , newline_sep=__lowerCAmelCase )['''rougeLsum''']
UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =Path('''examples/seq2seq/test_data/wmt_en_ro''' )
UpperCAmelCase : Tuple =calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
| 78
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : List[Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def A_ ( a , a , a=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if latents is None:
SCREAMING_SNAKE_CASE_ : Dict = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ : str = latents.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ : Any = torch.device(f"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._execution_device
SCREAMING_SNAKE_CASE_ : int = guidance_scale > 1.0
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : str = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = downscale_height_and_width(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : List[str] = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )[0]
# post-processing
SCREAMING_SNAKE_CASE_ : List[Any] = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : Tuple = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : str = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 253
|
def A_ ( a ):
"""simple docstring"""
return "".join(chr(ord(a ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 253
| 1
|
import itertools
import math
def A ( _lowercase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ):
SCREAMING_SNAKE_CASE : int = 2
while True:
if is_prime(_lowercase ):
yield num
num += 1
def A ( _lowercase = 10_001 ):
return next(itertools.islice(prime_generator() , nth - 1 , _lowercase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 258
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
SCREAMING_SNAKE_CASE : Tuple = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = model.state_dict()
def to_tf_var_name(_lowercase ):
for patt, repl in iter(_lowercase ):
SCREAMING_SNAKE_CASE : Dict = name.replace(_lowercase , _lowercase )
return f"""bert/{name}"""
def create_tf_var(_lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE : Tuple = tf.get_variable(dtype=_lowercase , shape=tensor.shape , name=_lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE : List[str] = to_tf_var_name(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE : Any = torch_tensor.T
SCREAMING_SNAKE_CASE : str = create_tf_var(tensor=_lowercase , name=_lowercase , session=_lowercase )
tf.keras.backend.set_value(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Dict = session.run(_lowercase )
print(f"""Successfully created {tf_name}: {np.allclose(_lowercase , _lowercase )}""" )
SCREAMING_SNAKE_CASE : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowercase , os.path.join(_lowercase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A ( _lowercase=None ):
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_lowercase , required=_lowercase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=_lowercase , default=_lowercase , required=_lowercase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=_lowercase , required=_lowercase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=_lowercase , required=_lowercase , help='''Directory in which to save tensorflow model''' )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args(_lowercase )
SCREAMING_SNAKE_CASE : Any = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 258
| 1
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
UpperCAmelCase_ :Optional[datasets.Features] = None
def _snake_case ( lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : List[int] , ) -> Any:
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase_ :List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowerCAmelCase_ :Optional[int] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
lowerCAmelCase_ :Optional[Any] = partition_df.collect()
lowerCAmelCase_ :Dict = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
def __init__( self , __A , __A=None , ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = df
lowerCAmelCase_ :str = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase_ :int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Tuple:
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self , __A ) -> "SparkExamplesIterable":
lowerCAmelCase_ :List[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__A )
return SparkExamplesIterable(self.df , partition_order=__A )
def __lowerCAmelCase ( self , __A , __A ) -> "SparkExamplesIterable":
lowerCAmelCase_ :Optional[Any] = self.split_shard_indices_by_worker(__A , __A )
return SparkExamplesIterable(self.df , partition_order=__A )
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.partition_order )
class _SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
UpperCAmelCase_ :Optional[Any] = SparkConfig
def __init__( self , __A , __A = None , __A = None , **__A , ) -> int:
import pyspark
lowerCAmelCase_ :Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase_ :Union[str, Any] = df
lowerCAmelCase_ :Optional[Any] = working_dir
super().__init__(
cache_dir=__A , config_name=str(self.df.semanticHash() ) , **__A , )
def __lowerCAmelCase ( self ) -> int:
# Returns the path of the created file.
def create_cache_and_write_probe(__A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__A )
lowerCAmelCase_ :Union[str, Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__A , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase_ :int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , __A ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(__A ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowerCAmelCase_ :Tuple = self.df.count()
lowerCAmelCase_ :Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase_ :Tuple = (
self.df.limit(__A )
.repartition(1 )
.mapInArrow(__A , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase_ :List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase_ :str = min(__A , int(approx_total_size / max_shard_size ) )
lowerCAmelCase_ :Optional[int] = self.df.repartition(__A )
def __lowerCAmelCase ( self , __A , __A , __A , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
lowerCAmelCase_ :Optional[int] = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowerCAmelCase_ :Dict = os.path.join(self._working_dir , os.path.basename(__A ) ) if self._working_dir else fpath
lowerCAmelCase_ :Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase_ :List[str] = self.config.features
lowerCAmelCase_ :List[Any] = self._writer_batch_size
lowerCAmelCase_ :str = self._fs.storage_options
def write_arrow(__A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase_ :Dict = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase_ :int = next(__A , __A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowerCAmelCase_ :Tuple = 0
lowerCAmelCase_ :List[str] = writer_class(
features=__A , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
lowerCAmelCase_ :int = pa.Table.from_batches([first_batch] )
writer.write_table(__A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowerCAmelCase_ :int = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
lowerCAmelCase_ :Any = pa.Table.from_batches([batch] )
writer.write_table(__A )
if writer._num_bytes > 0:
lowerCAmelCase_ , lowerCAmelCase_ :Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__A ) ):
lowerCAmelCase_ :Optional[int] = os.path.join(os.path.dirname(__A ) , os.path.basename(__A ) )
shutil.move(__A , __A )
lowerCAmelCase_ :Optional[int] = (
self.df.mapInArrow(__A , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self , __A , __A = "arrow" , __A = None , __A = None , **__A , ) -> Any:
self._validate_cache_dir()
lowerCAmelCase_ :Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__A )
lowerCAmelCase_ :Optional[Any] = not is_remote_filesystem(self._fs )
lowerCAmelCase_ :Tuple = os.path.join if is_local else posixpath.join
lowerCAmelCase_ :List[Any] = """-TTTTT-SSSSS-of-NNNNN"""
lowerCAmelCase_ :int = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCAmelCase_ :Optional[Any] = path_join(self._output_dir , __A )
lowerCAmelCase_ :Dict = 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :List[str] = []
for task_id, content in self._prepare_split_single(__A , __A , __A ):
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__A )
lowerCAmelCase_ :Optional[int] = total_num_examples
lowerCAmelCase_ :Tuple = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCAmelCase_ :Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase_ :List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__A , __A , __A , ):
rename(
__A , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , )
lowerCAmelCase_ :Tuple = []
lowerCAmelCase_ :Tuple = 0
for i in range(len(__A ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Dict = task_id_and_num_shards[i]
for shard_id in range(__A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__A , len(__A ) ).map(lambda __A : _rename_shard(*__A ) ).collect()
else:
# don't use any pattern
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(__A , """""" ) , )
def __lowerCAmelCase ( self , __A , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 84
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE__ = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase )}, but is'
f' {type(lowercase )}' )
lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase = additional_special_tokens_extended
else:
lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , pad_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = mask_token_sent
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# add special tokens to encoder dict
lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.offset
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase = self.sp_model.piece_to_id(lowercase )
return sp_id + self.offset
def _snake_case ( self , lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _snake_case ( self , lowercase=False ) -> Tuple:
return 1
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 46
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def A ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : bool = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_UpperCAmelCase ), magnitude * sin(_UpperCAmelCase )]
return [magnitude * cos(radians(_UpperCAmelCase ) ), magnitude * sin(radians(_UpperCAmelCase ) )]
def A ( _UpperCAmelCase : NDArray[floataa] , _UpperCAmelCase : NDArray[floataa] , _UpperCAmelCase : float = 10**-1 ) -> bool:
'''simple docstring'''
_UpperCAmelCase = cross(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = sum(_UpperCAmelCase )
return abs(_UpperCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase__ = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
UpperCAmelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 363
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase__ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __lowerCAmelCase :
def __init__( self : List[str] , A : int = 14) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('Unsupported Group')
_UpperCAmelCase = primes[group]['prime']
_UpperCAmelCase = primes[group]['generator']
_UpperCAmelCase = int(hexlify(urandom(32)) , base=16)
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
return hex(self.__private_key)[2:]
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = pow(self.generator , self.__private_key , self.prime)
return hex(A)[2:]
def _lowerCamelCase ( self : Tuple , A : int) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime) == 1
)
def _lowerCamelCase ( self : Optional[int] , A : str) -> str:
"""simple docstring"""
_UpperCAmelCase = int(A , base=16)
if not self.is_valid_public_key(A):
raise ValueError('Invalid public key')
_UpperCAmelCase = pow(A , self.__private_key , self.prime)
return shaaaa(str(A).encode()).hexdigest()
@staticmethod
def _lowerCamelCase ( A : int , A : int) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A) == 1
)
@staticmethod
def _lowerCamelCase ( A : str , A : str , A : int = 14) -> str:
"""simple docstring"""
_UpperCAmelCase = int(A , base=16)
_UpperCAmelCase = int(A , base=16)
_UpperCAmelCase = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(A , A):
raise ValueError('Invalid public key')
_UpperCAmelCase = pow(A , A , A)
return shaaaa(str(A).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Dict ) -> Tuple:
'''simple docstring'''
__A = tempfile.mkdtemp()
__A = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__A = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__A = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :List[Any] , **_A :Dict ) -> Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Union[str, Any] , **_A :str ) -> Optional[Any]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :str , **_A :List[Any] ) -> Dict:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self :List[Any] ) -> Any:
'''simple docstring'''
__A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
__A = self.get_image_processor()
__A = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
__A = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
__A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
__A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :int ) -> str:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__A = self.prepare_image_inputs()
__A = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='np' )
__A = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__A = 'lower newer'
__A = processor(text=_SCREAMING_SNAKE_CASE )
__A = tokenizer(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__A = 'lower newer'
__A = self.prepare_image_inputs()
__A = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def lowercase_ ( self :Any ) -> Optional[Any]:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(_SCREAMING_SNAKE_CASE )
__A = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__A = 'lower newer'
__A = self.prepare_image_inputs()
__A = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 161
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml'
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = defaultdict(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
UpperCamelCase = new_doc_list
UpperCamelCase = [key for key, value in counts.items() if value > 1]
UpperCamelCase = []
for duplicate_key in duplicates:
UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def lowercase__ ( __UpperCamelCase=False )-> List[str]:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase = api_doc[scheduler_idx]["""sections"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowercase__ ( __UpperCamelCase=False )-> Tuple:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase = False
UpperCamelCase = api_doc[pipeline_idx]["""sections"""]
UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase = pipeline_doc["""section"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if overwrite:
UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 321
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 366
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE (a__ ):
def __get__( self , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
__A : Optional[Any] = '__cached_' + self.fget.__name__
__A : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if cached is None:
__A : int = self.fget(_UpperCAmelCase)
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return cached
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Tuple:
__A : Dict = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def _lowerCAmelCase ( __snake_case : int ) -> Tuple:
if is_torch_fx_proxy(__snake_case ):
return True
if is_torch_available():
import torch
if isinstance(__snake_case , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case , (jnp.ndarray, Tracer) ):
return True
return isinstance(__snake_case , np.ndarray )
def _lowerCAmelCase ( __snake_case : Dict ) -> List[str]:
return isinstance(__snake_case , np.ndarray )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Any:
return _is_numpy(__snake_case )
def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]:
import torch
return isinstance(__snake_case , torch.Tensor )
def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]:
return False if not is_torch_available() else _is_torch(__snake_case )
def _lowerCAmelCase ( __snake_case : Any ) -> List[Any]:
import torch
return isinstance(__snake_case , torch.device )
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
return False if not is_torch_available() else _is_torch_device(__snake_case )
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
import torch
if isinstance(__snake_case , __snake_case ):
if hasattr(__snake_case , __snake_case ):
__A : str = getattr(__snake_case , __snake_case )
else:
return False
return isinstance(__snake_case , torch.dtype )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Any:
return False if not is_torch_available() else _is_torch_dtype(__snake_case )
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
import tensorflow as tf
return isinstance(__snake_case , tf.Tensor )
def _lowerCAmelCase ( __snake_case : Dict ) -> List[Any]:
return False if not is_tf_available() else _is_tensorflow(__snake_case )
def _lowerCAmelCase ( __snake_case : str ) -> List[str]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__snake_case )
return type(__snake_case ) == tf.Tensor
def _lowerCAmelCase ( __snake_case : Dict ) -> Tuple:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case )
def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case , jnp.ndarray )
def _lowerCAmelCase ( __snake_case : int ) -> List[str]:
return False if not is_flax_available() else _is_jax(__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_py_obj(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return [to_py_obj(__snake_case ) for o in obj]
elif is_tf_tensor(__snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case ).tolist()
elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[int]:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_numpy(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return np.array(__snake_case )
elif is_tf_tensor(__snake_case ):
return obj.numpy()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case )
else:
return obj
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = fields(self)
# Safety and consistency checks
if not len(_UpperCAmelCase):
raise ValueError(F'{self.__class__.__name__} has no fields.')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.')
__A : Tuple = getattr(self , class_fields[0].name)
__A : Tuple = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(_UpperCAmelCase):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[str] = first_field.items()
__A : List[Any] = True
else:
try:
__A : List[Any] = iter(_UpperCAmelCase)
__A : Optional[Any] = True
except TypeError:
__A : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_UpperCAmelCase):
if (
not isinstance(_UpperCAmelCase , (list, tuple))
or not len(_UpperCAmelCase) == 2
or not isinstance(element[0] , _UpperCAmelCase)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__A : Optional[int] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).')
break
setattr(self , element[0] , element[1])
if element[1] is not None:
__A : Optional[int] = element[1]
elif first_field is not None:
__A : Dict = first_field
else:
for field in class_fields:
__A : List[str] = getattr(self , field.name)
if v is not None:
__A : Union[str, Any] = v
def __delitem__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_UpperCAmelCase , _UpperCAmelCase)
super().__setattr__(_UpperCAmelCase , _UpperCAmelCase)
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__setitem__(_UpperCAmelCase , _UpperCAmelCase)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}')
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''longest'''
lowerCAmelCase = '''max_length'''
lowerCAmelCase = '''do_not_pad'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''pt'''
lowerCAmelCase = '''tf'''
lowerCAmelCase = '''np'''
lowerCAmelCase = '''jax'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = context_managers
__A : Optional[int] = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_UpperCAmelCase)
def __exit__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
self.stack.__exit__(*_UpperCAmelCase , **_UpperCAmelCase)
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
__A : Any = infer_framework(__snake_case )
if framework == "tf":
__A : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__A : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
__A : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCAmelCase ( __snake_case : int ) -> List[Any]:
__A : Any = model_class.__name__
__A : Optional[int] = infer_framework(__snake_case )
if framework == "tf":
__A : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__A : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
__A : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCAmelCase ( __snake_case : MutableMapping , __snake_case : str = "" , __snake_case : str = "." ) -> Union[str, Any]:
def _flatten_dict(__snake_case : Tuple , __snake_case : List[Any]="" , __snake_case : Tuple="." ):
for k, v in d.items():
__A : List[Any] = str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k
if v and isinstance(__snake_case , __snake_case ):
yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) )
@contextmanager
def _lowerCAmelCase ( __snake_case : Any , __snake_case : bool = False ) -> List[str]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=None ) -> int:
if is_numpy_array(__snake_case ):
return np.transpose(__snake_case , axes=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.T if axes is None else array.permute(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.transpose(__snake_case , perm=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.transpose(__snake_case , axes=__snake_case )
else:
raise ValueError(f'Type not supported for transpose: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : str ) -> str:
if is_numpy_array(__snake_case ):
return np.reshape(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.reshape(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.reshape(__snake_case , __snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.reshape(__snake_case , __snake_case )
else:
raise ValueError(f'Type not supported for reshape: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : List[str]=None ) -> Any:
if is_numpy_array(__snake_case ):
return np.squeeze(__snake_case , axis=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.squeeze(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.squeeze(__snake_case , axis=__snake_case )
else:
raise ValueError(f'Type not supported for squeeze: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
if is_numpy_array(__snake_case ):
return np.expand_dims(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.unsqueeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.expand_dims(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.expand_dims(__snake_case , axis=__snake_case )
else:
raise ValueError(f'Type not supported for expand_dims: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
if is_numpy_array(__snake_case ):
return np.size(__snake_case )
elif is_torch_tensor(__snake_case ):
return array.numel()
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.size(__snake_case )
elif is_jax_tensor(__snake_case ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple ) -> Union[str, Any]:
for key, value in auto_map.items():
if isinstance(__snake_case , (tuple, list) ):
__A : Tuple = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__A : Dict = f'{repo_id}--{value}'
return auto_map
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
for base_class in inspect.getmro(__snake_case ):
__A : int = base_class.__module__
__A : List[str] = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 190
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
__lowerCAmelCase: Optional[Any] = []
__lowerCAmelCase: Dict = []
__lowerCAmelCase: Optional[int] = []
for rt in rc.restypes:
__lowerCAmelCase: Any = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase: Any = {name: i for i, name in enumerate(__SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__lowerCAmelCase: int = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein["aatype"].device , )
__lowerCAmelCase: Optional[int] = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein["aatype"].device , )
__lowerCAmelCase: List[Any] = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein["aatype"].device , )
__lowerCAmelCase: int = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase: Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase: Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase: List[str] = residx_atomaa_mask
__lowerCAmelCase: Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase: List[Any] = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase: Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase: Tuple = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase: Union[str, Any] = rc.restype_atoa[restype_letter]
__lowerCAmelCase: Union[str, Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase: Optional[int] = rc.atom_order[atom_name]
__lowerCAmelCase: Optional[int] = 1
__lowerCAmelCase: Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase: List[Any] = residx_atomaa_mask
return protein
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict[str, np.ndarray]:
__lowerCAmelCase: Optional[Any] = tree_map(lambda __SCREAMING_SNAKE_CASE : torch.tensor(__SCREAMING_SNAKE_CASE , device=batch["aatype"].device ) , __SCREAMING_SNAKE_CASE , np.ndarray )
__lowerCAmelCase: List[Any] = tensor_tree_map(lambda __SCREAMING_SNAKE_CASE : np.array(__SCREAMING_SNAKE_CASE ) , make_atomaa_masks(__SCREAMING_SNAKE_CASE ) )
return out
| 217
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Optional[Any] = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__SCREAMING_SNAKE_CASE )}"
| 217
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ :Optional[Any] = logging.get_logger(__name__)
lowercase__ :Optional[Any] = {"vocab_file": "spm_char.model"}
lowercase__ :Tuple = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
lowercase__ :int = {
"microsoft/speecht5_asr": 1024,
"microsoft/speecht5_tts": 1024,
"microsoft/speecht5_vc": 1024,
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =VOCAB_FILES_NAMES
lowercase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Tuple =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__="<s>" ,A__="</s>" ,A__="<unk>" ,A__="<pad>" ,A__ = None ,**A__ ,):
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ ,eos_token=A__ ,unk_token=A__ ,pad_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(A__)
@property
def A__ ( self):
return self.sp_model.get_piece_size()
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__):
return self.sp_model.encode(A__ ,out_type=A__)
def A__ ( self ,A__):
return self.sp_model.piece_to_id(A__)
def A__ ( self ,A__):
lowercase = self.sp_model.IdToPiece(A__)
return token
def A__ ( self ,A__):
lowercase = []
lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__) + token
lowercase = []
else:
current_sub_tokens.append(A__)
out_string += self.sp_model.decode(A__)
return out_string.strip()
def A__ ( self ,A__ ,A__=None):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
lowercase = [1]
if token_ids_a is None:
return ([0] * len(A__)) + suffix_ones
return ([0] * len(A__)) + ([0] * len(A__)) + suffix_ones
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
| 97
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.exp(lowerCAmelCase__ )
lowercase = torch.sum(lowerCAmelCase__ , dim=1 ) # sum of exp(x_i)
lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = config.output_attentions
lowercase = config.output_hidden_states
lowercase = nn.ModuleList([BertLayer(A__) for _ in range(config.num_hidden_layers)])
lowercase = nn.ModuleList([BertHighway(A__) for _ in range(config.num_hidden_layers)])
lowercase = [-1 for _ in range(config.num_hidden_layers)]
def A__ ( self ,A__):
if (type(A__) is float) or (type(A__) is int):
for i in range(len(self.early_exit_entropy)):
lowercase = x
else:
lowercase = x
def A__ ( self ,A__):
lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def A__ ( self ,A__ ,A__=None ,A__=None ,A__=None ,A__=None ,):
lowercase = ()
lowercase = ()
lowercase = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = layer_module(
A__ ,A__ ,head_mask[i] ,A__ ,A__)
lowercase = layer_outputs[0]
if self.output_attentions:
lowercase = all_attentions + (layer_outputs[1],)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = current_outputs + (all_attentions,)
lowercase = self.highway[i](A__)
# logits, pooled_output
if not self.training:
lowercase = highway_exit[0]
lowercase = entropy(A__)
lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ ,i + 1)
else:
lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = outputs + (all_attentions,)
lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config
lowercase = BertEmbeddings(A__)
lowercase = DeeBertEncoder(A__)
lowercase = BertPooler(A__)
self.init_weights()
def A__ ( self):
self.encoder.init_highway_pooler(self.pooler)
def A__ ( self):
return self.embeddings.word_embeddings
def A__ ( self ,A__):
lowercase = value
def A__ ( self ,A__):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__)
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
lowercase = input_ids.size()
elif inputs_embeds is not None:
lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if encoder_attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if token_type_ids is None:
lowercase = torch.zeros(A__ ,dtype=torch.long ,device=A__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase = self.get_extended_attention_mask(A__ ,A__ ,A__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase = encoder_attention_mask[:, None, None, :]
lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
lowercase = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase = self.get_head_mask(A__ ,self.config.num_hidden_layers)
lowercase = self.embeddings(
input_ids=A__ ,position_ids=A__ ,token_type_ids=A__ ,inputs_embeds=A__)
lowercase = self.encoder(
A__ ,attention_mask=A__ ,head_mask=A__ ,encoder_hidden_states=A__ ,encoder_attention_mask=A__ ,)
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__):
lowercase = message
lowercase = exit_layer # start from 1!
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = BertPooler(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,config.num_labels)
def A__ ( self ,A__):
# Pooler
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
# "return" pooler_output
# BertModel
lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase = bmodel_output[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeBertModel(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=-1 ,A__=False ,):
lowercase = self.num_layers
try:
lowercase = self.bert(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,position_ids=A__ ,head_mask=A__ ,inputs_embeds=A__ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase = outputs[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(A__)
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(A__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels) ,labels.view(-1))
highway_losses.append(A__)
if train_highway:
lowercase = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 97
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=7 , snake_case_ : List[Any]=True , snake_case_ : int=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=99 , snake_case_ : Any=32 , snake_case_ : List[Any]=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : Any=37 , snake_case_ : int="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : str=512 , snake_case_ : int=16 , snake_case_ : Dict=2 , snake_case_ : Dict=0.02 , snake_case_ : List[Any]=4 , ):
snake_case__ : List[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Tuple = seq_length
snake_case__ : Dict = is_training
snake_case__ : List[Any] = use_attention_mask
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : int = num_choices
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = None
if self.use_attention_mask:
snake_case__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : str = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase ( self : str ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = config_and_inputs
snake_case__ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = True
lowercase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase ( self : int ):
for model_class_name in self.all_model_classes:
snake_case__ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case_ )
snake_case__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case__ : Dict = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : List[str] = model(snake_case_ )[0]
snake_case__ : Union[str, Any] = 50_000
snake_case__ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , snake_case_ )
snake_case__ : Tuple = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 35
|
from collections.abc import Callable
def _a ( SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : float = a
UpperCamelCase__ : float = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCamelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
UpperCamelCase__ : Tuple = mid
else:
UpperCamelCase__ : Dict = mid
UpperCamelCase__ : List[str] = start + (end - start) / 2.0
return mid
def _a ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 146
| 0
|
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = parent
def UpperCAmelCase_ ( self : int ) -> Tuple:
return {}
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
__SCREAMING_SNAKE_CASE = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractionTester(self )
@property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE = get_html_strings()[0]
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
__SCREAMING_SNAKE_CASE = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE = get_html_strings()
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE = expected_nodes + [["My First Heading", "My first paragraph."]]
__SCREAMING_SNAKE_CASE = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
| 359
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCAmelCase_ ):
print(f"""{i}\t\t{d}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [float("inf" )] * vertex_count
__SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__SCREAMING_SNAKE_CASE = distance[u] + w
__SCREAMING_SNAKE_CASE = check_negative_cycle(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Union[str, Any] = int(input('''Enter number of vertices: ''').strip())
a__ : Any = int(input('''Enter number of edges: ''').strip())
a__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
a__ , a__ , a__ : str = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
a__ : str = {'''src''': src, '''dst''': dest, '''weight''': weight}
a__ : str = int(input('''\nEnter shortest path source:''').strip())
a__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 195
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a__ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : str , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317
| 1
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : Union[str, Any] = ''
for word_or_phrase in separated:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(_UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
lowerCamelCase__ : str = len(bin(_UpperCAmelCase )[3:] )
lowerCamelCase__ : Dict = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
lowerCamelCase__ : Optional[int] = (
(
'1'
+ '0' * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : str = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = "deit"
def __init__( self : List[Any] , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : Any=1_2 , UpperCAmelCase__ : Dict=3_0_7_2 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : Any=2_2_4 , UpperCAmelCase__ : Tuple=1_6 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=1_6 , **UpperCAmelCase__ : str , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = encoder_stride
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = version.parse("1.11")
@property
def UpperCAmelCase_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
return 1E-4
| 54
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 54
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = s.rsplit(UpperCamelCase__ , UpperCamelCase__ )
return new.join(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = {}
A__ = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
A__ = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
A__ = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
A__ = rreplace(UpperCamelCase__ , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
A__ = rreplace(UpperCamelCase__ , '.b' , '.bias' , 1 )
A__ = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=True ):
"""simple docstring"""
from dall_e import Encoder
A__ = Encoder()
if os.path.exists(UpperCamelCase__ ):
A__ = torch.load(UpperCamelCase__ )
else:
A__ = torch.hub.load_state_dict_from_url(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = ckpt.state_dict()
encoder.load_state_dict(UpperCamelCase__ )
if config_path is not None:
A__ = FlavaImageCodebookConfig.from_pretrained(UpperCamelCase__ )
else:
A__ = FlavaImageCodebookConfig()
A__ = FlavaImageCodebook(UpperCamelCase__ ).eval()
A__ = encoder.state_dict()
A__ = upgrade_state_dict(UpperCamelCase__ )
hf_model.load_state_dict(UpperCamelCase__ )
A__ = hf_model.state_dict()
A__ = count_parameters(UpperCamelCase__ )
A__ = count_parameters(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(UpperCamelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__lowerCamelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 154
|
"""simple docstring"""
from collections import namedtuple
__lowerCamelCase = namedtuple("from_to", "from_ to")
__lowerCamelCase = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.0_0_1, 10_00),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
"cubicyard": from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
"cubicfoot": from_to(0.0_2_8, 3_5.3_1_4_7),
"cup": from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(UpperCamelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(UpperCamelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [144, 192, 240]
__SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [96, 120, 144]
__SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [64, 80, 96]
__SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 320]
__SCREAMING_SNAKE_CASE = 0.05
__SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 21
__SCREAMING_SNAKE_CASE = "pascal-voc-id2label.json"
else:
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__SCREAMING_SNAKE_CASE = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
__SCREAMING_SNAKE_CASE = name.replace(".block." , "." )
if "exp_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__SCREAMING_SNAKE_CASE = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__SCREAMING_SNAKE_CASE = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
__SCREAMING_SNAKE_CASE = name.replace(".norm." , ".normalization." )
if ".conv." in name:
__SCREAMING_SNAKE_CASE = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
__SCREAMING_SNAKE_CASE = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__SCREAMING_SNAKE_CASE = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".global_rep.{i}.weight""" , ".layernorm.weight" )
if f""".global_rep.{i}.bias""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".global_rep.{i}.bias""" , ".layernorm.bias" )
if ".global_rep." in name:
__SCREAMING_SNAKE_CASE = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
__SCREAMING_SNAKE_CASE = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
__SCREAMING_SNAKE_CASE = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
__SCREAMING_SNAKE_CASE = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
__SCREAMING_SNAKE_CASE = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__SCREAMING_SNAKE_CASE = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
__SCREAMING_SNAKE_CASE = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__SCREAMING_SNAKE_CASE = "mobilevit." + name
return name
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
if base_model:
__SCREAMING_SNAKE_CASE = ""
else:
__SCREAMING_SNAKE_CASE = "mobilevit."
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(lowerCAmelCase_ )
if key[:8] == "encoder.":
__SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
__SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
__SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__SCREAMING_SNAKE_CASE = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
__SCREAMING_SNAKE_CASE = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_mobilevit_config(lowerCAmelCase_ )
# load original state_dict
__SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(lowerCAmelCase_ ).eval()
else:
__SCREAMING_SNAKE_CASE = MobileViTForImageClassification(lowerCAmelCase_ ).eval()
__SCREAMING_SNAKE_CASE = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__SCREAMING_SNAKE_CASE = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
__SCREAMING_SNAKE_CASE = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCAmelCase_ , organization="apple" )
model.push_to_hub(lowerCAmelCase_ , organization="apple" )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ : Any = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 54
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292
| 0
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 363
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 0
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""vqvae"""]
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , __lowerCamelCase ) else 1000
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ):
'''simple docstring'''
__A : Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
__A : Any = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__A : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__A : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
__A : Optional[int] = noise
__A : int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
__A : int = self.mel.audio_slice_to_image(__lowerCamelCase )
__A : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__A : Union[str, Any] = (input_image / 255) * 2 - 1
__A : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__A : Any = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
__A : Optional[Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__A : Union[str, Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
__A : Optional[int] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__A : int = int(mask_start_secs * pixels_per_second )
__A : Dict = int(mask_end_secs * pixels_per_second )
__A : int = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
__A : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )['''sample''']
else:
__A : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase )['''sample''']
if isinstance(self.scheduler , __lowerCamelCase ):
__A : Optional[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )['''prev_sample''']
else:
__A : Optional[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
__A : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
__A : Tuple = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__A : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
__A : List[str] = self.vqvae.decode(__lowerCamelCase )['''sample''']
__A : Any = (images / 2 + 0.5).clamp(0 , 1 )
__A : Any = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__A : Any = (images * 255).round().astype('''uint8''' )
__A : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
__A : Dict = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
__A : str = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__A : Any = (sample / 255) * 2 - 1
__A : Dict = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__A : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__A : List[Any] = self.scheduler.alphas_cumprod[t]
__A : Optional[int] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__A : List[Any] = 1 - alpha_prod_t
__A : int = self.unet(__lowerCamelCase , __lowerCamelCase )['''sample''']
__A : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__A : List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__A : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 179
|
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
if n == 0:
return 0
__A : int = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + naive_cut_rod_recursive(n - i ,snake_case_ ) )
return max_revue
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->int:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
__A : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_ ,snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : list ,snake_case_ : list ) ->Any:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A : Any = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,snake_case_ ,snake_case_ ) ,)
__A : Any = max_revenue
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
__A : List[Any] = 0
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max_rev[i]
for j in range(1 ,i + 1 ):
__A : str = max(snake_case_ ,prices[j - 1] + max_rev[i - j] )
__A : List[str] = max_revenue_i
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Union[str, Any]:
'''simple docstring'''
if n < 0:
__A : Union[str, Any] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
__A : List[Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Any = [6, 10, 12, 15, 20, 23]
__A : Union[str, Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A : str = 36
__A : Any = top_down_cut_rod(snake_case_ ,snake_case_ )
__A : Any = bottom_up_cut_rod(snake_case_ ,snake_case_ )
__A : Optional[Any] = naive_cut_rod_recursive(snake_case_ ,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 179
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : List[Any] = """upernet"""
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=512 , __lowerCAmelCase=0.02 , __lowerCAmelCase=[1, 2, 3, 6] , __lowerCAmelCase=True , __lowerCAmelCase=0.4 , __lowerCAmelCase=384 , __lowerCAmelCase=256 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=255 , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = backbone_config.get("""model_type""" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(__lowerCAmelCase )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = hidden_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = pool_scales
UpperCamelCase__ = use_auxiliary_head
UpperCamelCase__ = auxiliary_loss_weight
UpperCamelCase__ = auxiliary_in_channels
UpperCamelCase__ = auxiliary_channels
UpperCamelCase__ = auxiliary_num_convs
UpperCamelCase__ = auxiliary_concat_input
UpperCamelCase__ = loss_ignore_index
def _lowerCamelCase ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 87
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Dict = """speech_to_text_2"""
snake_case : List[Any] = ["""past_key_values"""]
snake_case : List[str] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowerCAmelCase=10000 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=4 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=1024 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ = max_target_positions
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 87
| 1
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase = "__DUMMY_TRANSFORMERS_USER__"
lowercase = "Dummy User"
lowercase = "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"
lowercase = "https://hub-ci.huggingface.co"
lowercase = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
lowercase = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
lowercase = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __UpperCAmelCase ( a_):
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , a_)
@pytest.fixture
def __UpperCAmelCase ( a_):
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , a_)
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , a_)
@pytest.fixture
def __UpperCAmelCase ( a_):
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , a_)
@pytest.fixture
def __UpperCAmelCase ( a_ , a_):
HfFolder.save_token(a_)
yield
HfFolder.delete_token()
@pytest.fixture(scope='session')
def __UpperCAmelCase ( ):
return HfApi(endpoint=a_)
@pytest.fixture(scope='session')
def __UpperCAmelCase ( a_):
snake_case_ = HfFolder.get_token()
HfFolder.save_token(a_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(a_)
@pytest.fixture
def __UpperCAmelCase ( a_):
def _cleanup_repo(a_):
hf_api.delete_repo(a_ , token=a_ , repo_type='dataset')
return _cleanup_repo
@pytest.fixture
def __UpperCAmelCase ( a_):
@contextmanager
def _temporary_repo(a_):
try:
yield repo_id
finally:
cleanup_repo(a_)
return _temporary_repo
@pytest.fixture(scope='session')
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = f'''repo_txt_data-{int(time.time() * 10E3)}'''
snake_case_ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a_ , token=a_ , repo_type='dataset' , private=a_)
hf_api.upload_file(
token=a_ , path_or_fileobj=str(a_) , path_in_repo='data/text_data.txt' , repo_id=a_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a_ , token=a_ , repo_type='dataset')
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( a_ , a_ , a_):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session')
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = f'''repo_zipped_txt_data-{int(time.time() * 10E3)}'''
snake_case_ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a_ , token=a_ , repo_type='dataset' , private=a_)
hf_api.upload_file(
token=a_ , path_or_fileobj=str(a_) , path_in_repo='data.zip' , repo_id=a_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a_ , token=a_ , repo_type='dataset')
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( a_ , a_ , a_):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session')
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = f'''repo_zipped_img_data-{int(time.time() * 10E3)}'''
snake_case_ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a_ , token=a_ , repo_type='dataset' , private=a_)
hf_api.upload_file(
token=a_ , path_or_fileobj=str(a_) , path_in_repo='data.zip' , repo_id=a_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a_ , token=a_ , repo_type='dataset')
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( a_ , a_ , a_):
return hf_private_dataset_repo_zipped_img_data_
| 178
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178
| 1
|
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = model.config
UpperCAmelCase__ : Optional[int] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCAmelCase__ : Dict = MBartConfig(
is_decoder=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , add_cross_attention=lowerCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCamelCase__ , add_final_layer_norm=lowerCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> Tuple:
'''simple docstring'''
if "encoder.model" in name:
UpperCAmelCase__ : List[Any] = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
UpperCAmelCase__ : List[Any] = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
UpperCAmelCase__ : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
UpperCAmelCase__ : List[str] = "encoder." + name
if "attn.proj" in name:
UpperCAmelCase__ : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
UpperCAmelCase__ : List[Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase__ : str = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase__ : Any = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase__ : List[Any] = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase__ : str = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase__ : str = "encoder.layernorm.bias"
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] )-> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : int = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
UpperCAmelCase__ : Tuple = key.split("." )
UpperCAmelCase__ : int = int(key_split[3] )
UpperCAmelCase__ : Tuple = int(key_split[5] )
UpperCAmelCase__ : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ : Optional[int] = val[:dim, :]
UpperCAmelCase__ : Optional[Any] = val[dim : dim * 2, :]
UpperCAmelCase__ : str = val[-dim:, :]
else:
UpperCAmelCase__ : str = val[:dim]
UpperCAmelCase__ : str = val[dim : dim * 2]
UpperCAmelCase__ : Any = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase__ : List[str] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple=None , snake_case : Any=False )-> Any:
'''simple docstring'''
UpperCAmelCase__ : int = DonutModel.from_pretrained(lowerCamelCase__ ).eval()
# load HuggingFace model
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = get_configs(lowerCamelCase__ )
UpperCAmelCase__ : str = DonutSwinModel(lowerCamelCase__ )
UpperCAmelCase__ : str = MBartForCausalLM(lowerCamelCase__ )
UpperCAmelCase__ : str = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
UpperCAmelCase__ : int = original_model.state_dict()
UpperCAmelCase__ : Tuple = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify results on scanned document
UpperCAmelCase__ : Optional[int] = load_dataset("hf-internal-testing/example-documents" )
UpperCAmelCase__ : Tuple = dataset["test"][0]["image"].convert("RGB" )
UpperCAmelCase__ : Any = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase__ , from_slow=lowerCamelCase__ )
UpperCAmelCase__ : Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase__ : str = DonutProcessor(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__ : Tuple = processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase__ : List[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCAmelCase__ : Optional[Any] = "When is the coffee break?"
UpperCAmelCase__ : str = task_prompt.replace("{user_input}" , lowerCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase__ : str = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase__ : List[str] = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase__ : Optional[Any] = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase__ : Any = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase__ : List[Any] = "hello world"
else:
raise ValueError("Model name not supported" )
UpperCAmelCase__ : Any = original_model.decoder.tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="pt" )[
"input_ids"
]
UpperCAmelCase__ : Union[str, Any] = original_model.encoder.model.patch_embed(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : str = model.encoder.embeddings(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
# verify encoder hidden states
UpperCAmelCase__ : Optional[Any] = original_model.encoder(lowerCamelCase__ )
UpperCAmelCase__ : List[Any] = model.encoder(lowerCamelCase__ ).last_hidden_state
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
# verify decoder hidden states
UpperCAmelCase__ : Union[str, Any] = original_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).logits
UpperCAmelCase__ : int = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
_lowerCAmelCase : Any = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 353
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.