code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCamelCase : Optional[Any] = ""
lowerCamelCase : Any = ""
lowerCamelCase : str = ""
lowerCamelCase : int = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = get_dataset(__UpperCamelCase , __UpperCamelCase )
print('Processing...' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = update_image_and_anno(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase_ = random_chars(32 )
lowerCamelCase_ = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
lowerCamelCase_ = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__UpperCamelCase )} with {file_name}""" )
lowerCamelCase_ = []
for anno in new_annos[index]:
lowerCamelCase_ = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__UpperCamelCase )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase , '*.txt' ) ):
lowerCamelCase_ = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__UpperCamelCase ) as in_file:
lowerCamelCase_ = in_file.readlines()
lowerCamelCase_ = os.path.join(__UpperCamelCase , f"""{label_name}.jpg""" )
lowerCamelCase_ = []
for obj_list in obj_lists:
lowerCamelCase_ = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Any , lowercase : Optional[Any] = 1 ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for idx in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = []
lowerCamelCase_ = img_list[idx]
path_list.append(__UpperCamelCase )
lowerCamelCase_ = anno_list[idx]
lowerCamelCase_ = cva.imread(__UpperCamelCase )
if flip_type == 1:
lowerCamelCase_ = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
lowerCamelCase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCamelCase_ = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
lowerCamelCase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( lowercase : str = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 708
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = int(number**0.5 )
return number == sq * sq
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : List[str] , lowercase : List[str] , lowercase : Any , lowercase : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCamelCase_ = x_den * y_den * z_den
lowerCamelCase_ = gcd(lowercase , lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] = 35 ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = 42
lowerCamelCase_ = Fraction(0 )
lowerCamelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCamelCase_ = x_num * y_den + x_den * y_num
lowerCamelCase_ = x_den * y_den
lowerCamelCase_ = gcd(lowercase , lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase_ = add_three(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
unique_s.add(lowercase )
# n=2
lowerCamelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCamelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowercase ) and is_sq(lowercase ):
lowerCamelCase_ = int(sqrt(lowercase ) )
lowerCamelCase_ = int(sqrt(lowercase ) )
lowerCamelCase_ = gcd(lowercase , lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase_ = add_three(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
unique_s.add(lowercase )
# n=-1
lowerCamelCase_ = x_num * y_num
lowerCamelCase_ = x_den * y_num + x_num * y_den
lowerCamelCase_ = gcd(lowercase , lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase_ = add_three(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
unique_s.add(lowercase )
# n=2
lowerCamelCase_ = x_num * x_num * y_num * y_num
lowerCamelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase ) and is_sq(lowercase ):
lowerCamelCase_ = int(sqrt(lowercase ) )
lowerCamelCase_ = int(sqrt(lowercase ) )
lowerCamelCase_ = gcd(lowercase , lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase_ = add_three(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
unique_s.add(lowercase )
for num, den in unique_s:
total += Fraction(lowercase , lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 0
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Optional[int] , A_ : List[Any]=13 , A_ : Dict=30 , A_ : str=2 , A_ : List[str]=3 , A_ : Optional[int]=True , A_ : str=True , A_ : List[Any]=32 , A_ : Any=2 , A_ : Any=4 , A_ : int=37 , A_ : Dict="gelu" , A_ : Tuple=0.1 , A_ : Optional[Any]=0.1 , A_ : Tuple=10 , A_ : Optional[Any]=0.02 , A_ : List[Any]=3 , A_ : Any=None , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
def a__ ( self : str , A_ : str , A_ : Optional[int] , A_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel(config=_a )
lowerCamelCase_ = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(_a , interpolate_pos_encoding=_a , training=_a )
lowerCamelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a__ ( self : Optional[int] , A_ : Tuple , A_ : List[str] , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFViTForImageClassification(_a )
lowerCamelCase_ = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(_a , interpolate_pos_encoding=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFViTForImageClassification(_a )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = TFViTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(_a )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_a )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=_a , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**_a )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _a , atol=1E-4 )
| 710
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : str = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[Any]=False , lowercase : int=False ):
'''simple docstring'''
lowerCamelCase_ = '''backbone.''' if is_semantic else ''''''
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", 'beit.embeddings.cls_token'),
(f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'),
(f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'),
(f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int , lowercase : Optional[Any]=False , lowercase : str=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase_ = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = q_bias
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase_ = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase_ = gamma_a
lowerCamelCase_ = gamma_a
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Dict , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = dct.pop(__UpperCamelCase )
lowerCamelCase_ = val
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str=False ):
'''simple docstring'''
lowerCamelCase_ = False if '''rvlcdip''' in checkpoint_url else True
lowerCamelCase_ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase_ = 10_24
lowerCamelCase_ = 40_96
lowerCamelCase_ = 24
lowerCamelCase_ = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase_ = 16
lowerCamelCase_ = '''huggingface/label-files'''
lowerCamelCase_ = '''rvlcdip-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['''model''']
lowerCamelCase_ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
lowerCamelCase_ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
lowerCamelCase_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
lowerCamelCase_ = encoding['''pixel_values''']
lowerCamelCase_ = model(__UpperCamelCase )
lowerCamelCase_ = outputs.logits
# verify logits
lowerCamelCase_ = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
lowerCamelCase_ = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
lowerCamelCase_ = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
lowerCamelCase : str = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 711
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 0
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : str = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase_ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
lowerCamelCase_ = torch.load(hf_hub_download(repo_id=lowerCamelCase_ , filename='pytorch_model.bin' ) )
lowerCamelCase_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
lowerCamelCase_ = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
lowerCamelCase_ = tensor_value
lowerCamelCase_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
# convert tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : int = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 712
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowerCamelCase_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 2
while digits < n:
index += 1
lowerCamelCase_ = len(str(fibonacci(__snake_case ) ) )
return index
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 0
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class A:
'''simple docstring'''
def a__ ( self : Optional[Any] , A_ : Dict ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
raise NotImplementedError()
class A( __lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : List[Any] , A_ : List[Any] = False , **A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = tokenizer
lowerCamelCase_ = skip_prompt
lowerCamelCase_ = decode_kwargs
# variables used in the streaming process
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = True
def a__ ( self : Tuple , A_ : Union[str, Any] ) -> int:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
lowerCamelCase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCamelCase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCamelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
lowerCamelCase_ = text[self.print_len :]
lowerCamelCase_ = []
lowerCamelCase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCamelCase_ = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCamelCase_ = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def a__ ( self : Any ) -> int:
"""simple docstring"""
if len(self.token_cache ) > 0:
lowerCamelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCamelCase_ = text[self.print_len :]
lowerCamelCase_ = []
lowerCamelCase_ = 0
else:
lowerCamelCase_ = ''
lowerCamelCase_ = True
self.on_finalized_text(__A , stream_end=__A )
def a__ ( self : Dict , A_ : List[Any] , A_ : Dict = False ) -> Tuple:
"""simple docstring"""
print(__A , flush=__A , end='' if not stream_end else None )
def a__ ( self : str , A_ : str ) -> str:
"""simple docstring"""
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class A( __lowercase ):
'''simple docstring'''
def __init__( self : int , A_ : Dict , A_ : Any = False , A_ : Optional[int] = None , **A_ : str ) -> List[Any]:
"""simple docstring"""
super().__init__(__A , __A , **__A )
lowerCamelCase_ = Queue()
lowerCamelCase_ = None
lowerCamelCase_ = timeout
def a__ ( self : Dict , A_ : int , A_ : Optional[Any] = False ) -> Any:
"""simple docstring"""
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 714
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase : Dict = logging.get_logger(__name__)
class A( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , *A_ : Tuple , **A_ : Dict ) -> None:
"""simple docstring"""
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 715
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 0
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
return [ord(_UpperCAmelCase ) - 96 for elem in plain]
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , _UpperCAmelCase )
print('Decoded:' , decode(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase : Any = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
re.sub('<n>' , '' , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 717
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 0
|
import sys
import turtle
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Dict ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : int , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowercase , get_mid(_lowercase , _lowercase ) , get_mid(_lowercase , _lowercase ) , depth - 1 )
triangle(_lowercase , get_mid(_lowercase , _lowercase ) , get_mid(_lowercase , _lowercase ) , depth - 1 )
triangle(_lowercase , get_mid(_lowercase , _lowercase ) , get_mid(_lowercase , _lowercase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
lowerCamelCase : Any = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 718
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 0
|
from collections.abc import Iterable
from typing import Any
class A:
'''simple docstring'''
def __init__( self : List[Any] , A_ : Union[str, Any] = None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = value
lowerCamelCase_ = None # Added in order to delete a node easier
lowerCamelCase_ = None
lowerCamelCase_ = None
def __repr__( self : Any ) -> Any:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class A:
'''simple docstring'''
def __init__( self : Tuple , A_ : Union[str, Any] = None ) -> Any:
"""simple docstring"""
lowerCamelCase_ = root
def __str__( self : Dict ) -> int:
"""simple docstring"""
return str(self.root )
def a__ ( self : Optional[int] , A_ : Dict , A_ : Union[str, Any] ) -> Any:
"""simple docstring"""
if new_children is not None: # reset its kids
lowerCamelCase_ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_lowercase ): # If it is the right children
lowerCamelCase_ = new_children
else:
lowerCamelCase_ = new_children
else:
lowerCamelCase_ = new_children
def a__ ( self : Optional[Any] , A_ : Union[str, Any] ) -> str:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self.root is None
def a__ ( self : Union[str, Any] , A_ : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = Node(_lowercase ) # create a new Node
if self.empty(): # if Tree is empty
lowerCamelCase_ = new_node # set its root
else: # Tree is not empty
lowerCamelCase_ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCamelCase_ = new_node # We insert the new node in a leaf
break
else:
lowerCamelCase_ = parent_node.left
else:
if parent_node.right is None:
lowerCamelCase_ = new_node
break
else:
lowerCamelCase_ = parent_node.right
lowerCamelCase_ = parent_node
def a__ ( self : str , *A_ : Tuple ) -> Dict:
"""simple docstring"""
for value in values:
self.__insert(_lowercase )
def a__ ( self : List[str] , A_ : Tuple ) -> Any:
"""simple docstring"""
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
lowerCamelCase_ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCamelCase_ = node.left if value < node.value else node.right
return node
def a__ ( self : int , A_ : str = None ) -> List[str]:
"""simple docstring"""
if node is None:
if self.root is None:
return None
lowerCamelCase_ = self.root
if not self.empty():
while node.right is not None:
lowerCamelCase_ = node.right
return node
def a__ ( self : str , A_ : Optional[int] = None ) -> Union[str, Any]:
"""simple docstring"""
if node is None:
lowerCamelCase_ = self.root
if self.root is None:
return None
if not self.empty():
lowerCamelCase_ = self.root
while node.left is not None:
lowerCamelCase_ = node.left
return node
def a__ ( self : Dict , A_ : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.search(_lowercase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_lowercase , _lowercase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_lowercase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_lowercase , node.left )
else:
lowerCamelCase_ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCamelCase_ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def a__ ( self : Optional[Any] , A_ : Tuple ) -> Tuple:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def a__ ( self : Any , A_ : Any=None ) -> Dict:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def a__ ( self : Optional[int] , A_ : int , A_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if node:
self.inorder(_lowercase , node.left )
arr.append(node.value )
self.inorder(_lowercase , node.right )
def a__ ( self : Optional[Any] , A_ : Any , A_ : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = []
self.inorder(_lowercase , _lowercase ) # append all values to list using inorder traversal
return arr[k - 1]
def _SCREAMING_SNAKE_CASE ( lowercase : Node | None ):
'''simple docstring'''
lowerCamelCase_ = []
if curr_node is not None:
lowerCamelCase_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCamelCase_ = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 719
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
while second != 0:
lowerCamelCase_ = first & second
first ^= second
lowerCamelCase_ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[Any] = int(input("Enter the first number: ").strip())
lowerCamelCase : List[str] = int(input("Enter the second number: ").strip())
print(F"""{add(first, second) = }""")
| 720
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651
| 0
|
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase_ = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 721
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 10
lowerCamelCase_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
lowerCamelCase_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(_lowerCamelCase ) ),
} , features=_lowerCamelCase , )
return dataset
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=_lowerCamelCase )
return filename
# FILE_CONTENT + files
lowerCamelCase : List[Any] = """\
Text data.
Second line of data."""
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
lowerCamelCase_ = FILE_CONTENT
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return filename
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
import bza
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
lowerCamelCase_ = bytes(_lowerCamelCase , 'utf-8' )
with bza.open(_lowerCamelCase , 'wb' ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
import gzip
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
lowerCamelCase_ = bytes(_lowerCamelCase , 'utf-8' )
with gzip.open(_lowerCamelCase , 'wb' ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
lowerCamelCase_ = bytes(_lowerCamelCase , 'utf-8' )
with lza.frame.open(_lowerCamelCase , 'wb' ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Tuple ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(_lowerCamelCase , 'w' ) as archive:
archive.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Tuple ):
'''simple docstring'''
import tarfile
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(_lowerCamelCase , 'w' ) as f:
f.add(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
import lzma
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
lowerCamelCase_ = bytes(_lowerCamelCase , 'utf-8' )
with lzma.open(_lowerCamelCase , 'wb' ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : int ):
'''simple docstring'''
import zipfile
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
lowerCamelCase_ = bytes(_lowerCamelCase , 'utf-8' )
with zstd.open(_lowerCamelCase , 'wb' ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
lowerCamelCase_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return filename
lowerCamelCase : Tuple = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
lowerCamelCase : Optional[Any] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
lowerCamelCase : Optional[Any] = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase : Optional[Any] = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
lowerCamelCase : Tuple = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = datasets.Dataset.from_dict(_lowerCamelCase )
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(_lowerCamelCase ) ) as con:
lowerCamelCase_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(_lowerCamelCase , 'w' , newline='' ) as f:
lowerCamelCase_ = csv.DictWriter(_lowerCamelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(_lowerCamelCase , 'w' , newline='' ) as f:
lowerCamelCase_ = csv.DictWriter(_lowerCamelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : List[str] ):
'''simple docstring'''
import bza
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(_lowerCamelCase , 'rb' ) as f:
lowerCamelCase_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_lowerCamelCase , 'wb' ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Tuple , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Optional[int] , lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(_lowerCamelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
lowerCamelCase_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(_lowerCamelCase , 'wb' ) as f:
lowerCamelCase_ = pq.ParquetWriter(_lowerCamelCase , schema=_lowerCamelCase )
lowerCamelCase_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowerCamelCase ) )] for k in DATA[0]} , schema=_lowerCamelCase )
writer.write_table(_lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase_ = {'data': DATA}
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase_ = {'data': DATA_DICT_OF_LISTS}
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(_lowerCamelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(_lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(_lowerCamelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(_lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(_lowerCamelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(_lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(_lowerCamelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(_lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
import gzip
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(_lowerCamelCase , 'rb' ) as orig_file:
with gzip.open(_lowerCamelCase , 'wb' ) as zipped_file:
zipped_file.writelines(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : List[Any] ):
'''simple docstring'''
import gzip
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(_lowerCamelCase , 'rb' ) as orig_file:
with gzip.open(_lowerCamelCase , 'wb' ) as zipped_file:
zipped_file.writelines(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.join('nested' , os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(_lowerCamelCase , 'w' ) as f:
f.add(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
f.add(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(_lowerCamelCase , 'w' ) as f:
f.add(_lowerCamelCase , arcname=os.path.join('nested' , os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = ['0', '1', '2', '3']
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(_lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = ['0', '1', '2', '3']
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(_lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = ['0', '1', '2', '3']
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(_lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(_lowerCamelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
lowerCamelCase_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase , arcname=os.path.basename(_lowerCamelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 700
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Tuple=None , lowercase : Tuple=None ):
'''simple docstring'''
if "." in tensor_name:
lowerCamelCase_ = tensor_name.split('.' )
for split in splits[:-1]:
lowerCamelCase_ = getattr(lowercase , lowercase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowerCamelCase_ = new_module
lowerCamelCase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
lowerCamelCase_ = tensor_name in module._buffers
lowerCamelCase_ = getattr(lowercase , lowercase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
lowerCamelCase_ = False
lowerCamelCase_ = False
if is_buffer or not is_bitsandbytes_available():
lowerCamelCase_ = False
lowerCamelCase_ = False
else:
lowerCamelCase_ = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowerCamelCase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCamelCase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCamelCase_ = old_value.to(lowercase )
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = value.to('cpu' )
if value.dtype == torch.inta:
lowerCamelCase_ = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
lowerCamelCase_ = torch.tensor(lowercase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase ) and fpaa_statistics is None:
lowerCamelCase_ = new_value.T
lowerCamelCase_ = old_value.__dict__
if is_abit:
lowerCamelCase_ = bnb.nn.IntaParams(lowercase , requires_grad=lowercase , **lowercase ).to(lowercase )
elif is_abit:
lowerCamelCase_ = bnb.nn.Paramsabit(lowercase , requires_grad=lowercase , **lowercase ).to(lowercase )
lowerCamelCase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(lowercase ) )
else:
if value is None:
lowerCamelCase_ = old_value.to(lowercase )
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = value.to(lowercase )
else:
lowerCamelCase_ = torch.tensor(lowercase , device=lowercase )
if is_buffer:
lowerCamelCase_ = new_value
else:
lowerCamelCase_ = nn.Parameter(lowercase , requires_grad=old_value.requires_grad )
lowerCamelCase_ = new_value
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : str=None , lowercase : str=None , lowercase : int=None , lowercase : Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase_ = []
current_key_name.append(lowercase )
if (isinstance(lowercase , nn.Linear ) or isinstance(lowercase , lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase , lowercase ):
lowerCamelCase_ , lowerCamelCase_ = module.weight.shape
else:
lowerCamelCase_ = module.in_features
lowerCamelCase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCamelCase_ = bnb.nn.LinearabitLt(
lowercase , lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowerCamelCase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCamelCase_ = bnb.nn.Linearabit(
lowercase , lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowerCamelCase_ = True
# Store the module class in case we need to transpose the weight later
lowerCamelCase_ = type(lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase )
if len(list(module.children() ) ) > 0:
lowerCamelCase_ , lowerCamelCase_ = _replace_with_bnb_linear(
lowercase , lowercase , lowercase , lowercase , has_been_replaced=lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Union[str, Any]=None , lowercase : Any=None , lowercase : Any=None ):
'''simple docstring'''
lowerCamelCase_ = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
lowerCamelCase_ , lowerCamelCase_ = _replace_with_bnb_linear(
lowercase , lowercase , lowercase , lowercase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _SCREAMING_SNAKE_CASE ( *lowercase : str , **lowercase : int ):
'''simple docstring'''
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowercase , )
return replace_with_bnb_linear(*lowercase , **lowercase )
def _SCREAMING_SNAKE_CASE ( *lowercase : int , **lowercase : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowercase , )
return set_module_quantized_tensor_to_device(*lowercase , **lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = deepcopy(lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCamelCase_ = find_tied_parameters(lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase_ = sum(lowercase , [] )
lowerCamelCase_ = len(lowercase ) > 0
# Check if it is a base model
lowerCamelCase_ = not hasattr(lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase_ = list(model.named_children() )
lowerCamelCase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase_ = set(lowercase ) - set(lowercase )
lowerCamelCase_ = list(set(lowercase ) ) + list(lowercase )
# remove ".weight" from the keys
lowerCamelCase_ = ['.weight', '.bias']
lowerCamelCase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase_ = name.replace(lowercase , '' )
filtered_module_names.append(lowercase )
return filtered_module_names
| 701
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 702
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : int = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
| 0
|
from __future__ import annotations
from typing import Any
class A( __A ):
'''simple docstring'''
pass
class A:
'''simple docstring'''
def __init__( self : Dict , A_ : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = None
def __iter__( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self
lowerCamelCase_ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(A_ )
yield node.data
lowerCamelCase_ = node.next_node
@property
def a__ ( self : int ) -> Dict:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase : Tuple = Node(1)
lowerCamelCase : Tuple = Node(2)
lowerCamelCase : Tuple = Node(3)
lowerCamelCase : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
lowerCamelCase : Dict = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase : Union[str, Any] = Node(5)
lowerCamelCase : Tuple = Node(6)
lowerCamelCase : Union[str, Any] = Node(5)
lowerCamelCase : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
lowerCamelCase : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 704
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A( __lowercase ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''']
UpperCamelCase = '''SamImageProcessor'''
def __init__( self : List[str] , A_ : int ) -> Any:
"""simple docstring"""
super().__init__(_A )
lowerCamelCase_ = self.image_processor
lowerCamelCase_ = -10
lowerCamelCase_ = self.image_processor.size['longest_edge']
def __call__( self : Optional[Any] , A_ : Tuple=None , A_ : List[Any]=None , A_ : Dict=None , A_ : Any=None , A_ : Optional[Union[str, TensorType]] = None , **A_ : List[str] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.image_processor(
_A , return_tensors=_A , **_A , )
# pop arguments that are not used in the foward but used nevertheless
lowerCamelCase_ = encoding_image_processor['original_sizes']
if hasattr(_A , 'numpy' ): # Checks if Torch or TF tensor
lowerCamelCase_ = original_sizes.numpy()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._check_and_preprocess_points(
input_points=_A , input_labels=_A , input_boxes=_A , )
lowerCamelCase_ = self._normalize_and_convert(
_A , _A , input_points=_A , input_labels=_A , input_boxes=_A , return_tensors=_A , )
return encoding_image_processor
def a__ ( self : str , A_ : List[Any] , A_ : Optional[int] , A_ : Union[str, Any]=None , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Dict="pt" , ) -> Optional[Any]:
"""simple docstring"""
if input_points is not None:
if len(_A ) != len(_A ):
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] ) for point in input_points
]
else:
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , _A , _A )
for point, original_size in zip(_A , _A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowerCamelCase_ , lowerCamelCase_ = self._pad_points_and_labels(_A , _A )
lowerCamelCase_ = np.array(_A )
if input_labels is not None:
lowerCamelCase_ = np.array(_A )
if input_boxes is not None:
if len(_A ) != len(_A ):
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] , is_bounding_box=_A )
for box in input_boxes
]
else:
lowerCamelCase_ = [
self._normalize_coordinates(self.target_size , _A , _A , is_bounding_box=_A )
for box, original_size in zip(_A , _A )
]
lowerCamelCase_ = np.array(_A )
if input_boxes is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(_A )
# boxes batch size of 1 by default
lowerCamelCase_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(_A )
# boxes batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(_A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(_A )
# point batch size of 1 by default
lowerCamelCase_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(_A )
# point batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(_A , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowerCamelCase_ = torch.from_numpy(_A )
# point batch size of 1 by default
lowerCamelCase_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowerCamelCase_ = tf.convert_to_tensor(_A )
# point batch size of 1 by default
lowerCamelCase_ = tf.expand_dims(_A , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def a__ ( self : Any , A_ : str , A_ : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = max([point.shape[0] for point in input_points] )
lowerCamelCase_ = []
for i, point in enumerate(_A ):
if point.shape[0] != expected_nb_points:
lowerCamelCase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowerCamelCase_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_A )
lowerCamelCase_ = processed_input_points
return input_points, input_labels
def a__ ( self : List[Any] , A_ : int , A_ : np.ndarray , A_ : Any , A_ : Dict=False ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = original_size
lowerCamelCase_ , lowerCamelCase_ = self.image_processor._get_preprocess_shape(_A , longest_edge=_A )
lowerCamelCase_ = deepcopy(_A ).astype(_A )
if is_bounding_box:
lowerCamelCase_ = coords.reshape(-1 , 2 , 2 )
lowerCamelCase_ = coords[..., 0] * (new_w / old_w)
lowerCamelCase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCamelCase_ = coords.reshape(-1 , 4 )
return coords
def a__ ( self : Any , A_ : Optional[int]=None , A_ : List[str]=None , A_ : Optional[int]=None , ) -> Union[str, Any]:
"""simple docstring"""
if input_points is not None:
if hasattr(_A , 'numpy' ): # Checks for TF or Torch tensor
lowerCamelCase_ = input_points.numpy().tolist()
if not isinstance(_A , _A ) or not isinstance(input_points[0] , _A ):
raise ValueError('Input points must be a list of list of floating points.' )
lowerCamelCase_ = [np.array(_A ) for input_point in input_points]
else:
lowerCamelCase_ = None
if input_labels is not None:
if hasattr(_A , 'numpy' ):
lowerCamelCase_ = input_labels.numpy().tolist()
if not isinstance(_A , _A ) or not isinstance(input_labels[0] , _A ):
raise ValueError('Input labels must be a list of list integers.' )
lowerCamelCase_ = [np.array(_A ) for label in input_labels]
else:
lowerCamelCase_ = None
if input_boxes is not None:
if hasattr(_A , 'numpy' ):
lowerCamelCase_ = input_boxes.numpy().tolist()
if (
not isinstance(_A , _A )
or not isinstance(input_boxes[0] , _A )
or not isinstance(input_boxes[0][0] , _A )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
lowerCamelCase_ = [np.array(_A ).astype(np.floataa ) for box in input_boxes]
else:
lowerCamelCase_ = None
return input_points, input_labels, input_boxes
@property
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(_A ) )
def a__ ( self : List[Any] , *A_ : Union[str, Any] , **A_ : Optional[Any] ) -> int:
"""simple docstring"""
return self.image_processor.post_process_masks(*_A , **_A )
| 705
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = torch.load(lowerCAmelCase__ , map_location='cpu' )
lowerCamelCase_ = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCamelCase_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCamelCase_ = v
else:
lowerCamelCase_ = v
lowerCamelCase_ = chkpt['params']
lowerCamelCase_ = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase__ , (torch.FloatTensor, numpy.ndarray) )}
lowerCamelCase_ = chkpt['dico_word2id']
lowerCamelCase_ = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '\n' )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 706
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = [0] * len(lowerCAmelCase_ )
lowerCamelCase_ = []
lowerCamelCase_ = [1] * len(lowerCAmelCase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase_ )
while queue:
lowerCamelCase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase_ )
print(max(lowerCAmelCase_ ) )
# Adjacency list of Graph
lowerCamelCase : List[str] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 707
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 0
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCamelCase : int = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] = "mumbai" ):
'''simple docstring'''
lowerCamelCase_ = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
lowerCamelCase_ = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
lowerCamelCase_ = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 708
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class A( lowerCAmelCase__ ):
'''simple docstring'''
UpperCamelCase = "blip_2_vision_model"
def __init__( self : List[str] , A_ : Optional[Any]=1408 , A_ : str=6144 , A_ : str=39 , A_ : List[Any]=16 , A_ : Optional[int]=224 , A_ : Any=14 , A_ : List[str]="gelu" , A_ : List[str]=0.00001 , A_ : Optional[Any]=0.0 , A_ : int=1E-10 , A_ : List[Any]=True , **A_ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = patch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = hidden_act
lowerCamelCase_ = qkv_bias
@classmethod
def a__ ( cls : Any , A_ : Any , **A_ : int ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowerCamelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class A( lowerCAmelCase__ ):
'''simple docstring'''
UpperCamelCase = "blip_2_qformer"
def __init__( self : Any , A_ : Tuple=30522 , A_ : str=768 , A_ : List[Any]=12 , A_ : List[Any]=12 , A_ : List[str]=3072 , A_ : Dict="gelu" , A_ : List[Any]=0.1 , A_ : Union[str, Any]=0.1 , A_ : Optional[Any]=512 , A_ : Optional[int]=0.02 , A_ : int=1E-12 , A_ : Optional[Any]=0 , A_ : str="absolute" , A_ : Tuple=2 , A_ : Optional[int]=1408 , **A_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = cross_attention_frequency
lowerCamelCase_ = encoder_hidden_size
@classmethod
def a__ ( cls : Dict , A_ : Union[str, Any] , **A_ : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowerCamelCase_ = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class A( lowerCAmelCase__ ):
'''simple docstring'''
UpperCamelCase = "blip-2"
UpperCamelCase = True
def __init__( self : int , A_ : Any=None , A_ : Tuple=None , A_ : int=None , A_ : Any=32 , **A_ : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
if vision_config is None:
lowerCamelCase_ = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
lowerCamelCase_ = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
lowerCamelCase_ = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowerCamelCase_ = BlipaVisionConfig(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = BlipaQFormerConfig(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = text_config['model_type'] if 'model_type' in text_config else 'opt'
lowerCamelCase_ = CONFIG_MAPPING[text_model_type](**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.text_config.tie_word_embeddings
lowerCamelCase_ = self.text_config.is_encoder_decoder
lowerCamelCase_ = num_query_tokens
lowerCamelCase_ = self.vision_config.hidden_size
lowerCamelCase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.02
@classmethod
def a__ ( cls : List[Any] , A_ : Any , A_ : Tuple , A_ : Dict , **A_ : Any , ) -> Any:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_SCREAMING_SNAKE_CASE , )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.vision_config.to_dict()
lowerCamelCase_ = self.qformer_config.to_dict()
lowerCamelCase_ = self.text_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 709
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 0
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase : List[Any] = logging.getLogger(__name__)
class A( _A ):
'''simple docstring'''
UpperCamelCase = '''masked_bert'''
def __init__( self : Optional[int] , A_ : Optional[int]=30522 , A_ : Union[str, Any]=768 , A_ : Optional[int]=12 , A_ : Union[str, Any]=12 , A_ : int=3072 , A_ : Union[str, Any]="gelu" , A_ : Union[str, Any]=0.1 , A_ : Tuple=0.1 , A_ : int=512 , A_ : List[Any]=2 , A_ : Optional[Any]=0.02 , A_ : Any=1E-12 , A_ : Union[str, Any]=0 , A_ : List[Any]="topK" , A_ : str="constant" , A_ : int=0.0 , **A_ : Any , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = pruning_method
lowerCamelCase_ = mask_init
lowerCamelCase_ = mask_scale
| 710
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowercase , lowercase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
lowerCamelCase_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = Dict[str, Any]
lowerCamelCase : int = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *A_ : Optional[Any] , **A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a__ ( self : str , **A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = {}
if "threshold" in kwargs:
lowerCamelCase_ = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : Dict , *A_ : Dict , **A_ : List[Any] ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*__a , **__a )
def a__ ( self : Dict , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = load_image(__a )
lowerCamelCase_ = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase_ = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
lowerCamelCase_ = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
lowerCamelCase_ = target_size
return inputs
def a__ ( self : List[Any] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = model_inputs.pop('target_size' )
lowerCamelCase_ = self.model(**__a )
lowerCamelCase_ = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase_ = model_inputs['bbox']
return model_outputs
def a__ ( self : Dict , A_ : Dict , A_ : Union[str, Any]=0.9 ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase_ , lowerCamelCase_ = target_size[0].tolist()
def unnormalize(A_ : List[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowerCamelCase_ , lowerCamelCase_ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase_ = [unnormalize(__a ) for bbox in model_outputs['bbox'].squeeze(0 )]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [dict(zip(__a , __a ) ) for vals in zip(scores.tolist() , __a , __a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase_ = self.image_processor.post_process_object_detection(__a , __a , __a )
lowerCamelCase_ = raw_annotations[0]
lowerCamelCase_ = raw_annotation['scores']
lowerCamelCase_ = raw_annotation['labels']
lowerCamelCase_ = raw_annotation['boxes']
lowerCamelCase_ = scores.tolist()
lowerCamelCase_ = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase_ = [self._get_bounding_box(__a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [
dict(zip(__a , __a ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def a__ ( self : int , A_ : Any ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = box.int().tolist()
lowerCamelCase_ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 712
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 0
|
import math
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase : Any = 1 , lowercase : Dict = 1 , lowercase : Any = 1 ):
'''simple docstring'''
if (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
or isinstance(lowerCamelCase_ , lowerCamelCase_ )
or isinstance(lowerCamelCase_ , lowerCamelCase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowerCamelCase_ ) != input_a)
or (math.floor(lowerCamelCase_ ) != input_a)
or (math.floor(lowerCamelCase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
lowerCamelCase_ = qiskit.QuantumRegister(4 , 'qr' )
lowerCamelCase_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
lowerCamelCase_ = [input_a, input_a, carry_in]
lowerCamelCase_ = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase_ ) # measure the last two qbits
lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' )
lowerCamelCase_ = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 0
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A:
'''simple docstring'''
def __init__( self : str , A_ : Union[str, Any] , A_ : List[str]=13 , A_ : List[Any]=64 , A_ : List[Any]=2 , A_ : List[Any]=3 , A_ : Tuple=True , A_ : int=True , A_ : List[str]=32 , A_ : Dict=5 , A_ : List[Any]=4 , A_ : Optional[int]=37 , A_ : str="gelu" , A_ : Optional[int]=0.1 , A_ : List[str]=0.1 , A_ : Tuple=10 , A_ : Optional[int]=0.02 , A_ : List[str]=[1, 16, 4, 4] , A_ : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
lowerCamelCase_ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCamelCase_ = (self.image_size // 32) ** 2
lowerCamelCase_ = num_patches + 1
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase_ , )
def a__ ( self : Optional[int] , A_ : List[str] , A_ : Any , A_ : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = ViTHybridModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , A_ : Union[str, Any] , A_ : str , A_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = ViTHybridForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ViTHybridModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCamelCase_ = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ViTHybridModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCamelCase_ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
lowerCamelCase_ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase_ , return_tensors='pt' )
lowerCamelCase_ = model(**UpperCamelCase_ )
lowerCamelCase_ = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCamelCase_ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 714
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : int = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''bert'''
def __init__( self : Union[str, Any] , A_ : Tuple=30522 , A_ : Any=768 , A_ : Dict=12 , A_ : Optional[Any]=12 , A_ : Tuple=3072 , A_ : Union[str, Any]="gelu" , A_ : Union[str, Any]=0.1 , A_ : Optional[int]=0.1 , A_ : Any=512 , A_ : Dict=2 , A_ : Optional[int]=0.02 , A_ : Tuple=1E-12 , A_ : Union[str, Any]=0 , A_ : str="absolute" , A_ : int=True , A_ : str=None , **A_ : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
class A( UpperCamelCase ):
'''simple docstring'''
@property
def a__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
if n == 0:
return 0
lowerCamelCase_ = float('-inf' )
for i in range(1 , n + 1 ):
lowerCamelCase_ = max(
snake_case__ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case__ ) )
return max_revue
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
lowerCamelCase_ = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list , lowercase : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCamelCase_ = float('-inf' )
for i in range(1 , n + 1 ):
lowerCamelCase_ = max(
snake_case__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case__ , snake_case__ ) , )
lowerCamelCase_ = max_revenue
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCamelCase_ = [float('-inf' ) for _ in range(n + 1 )]
lowerCamelCase_ = 0
for i in range(1 , n + 1 ):
lowerCamelCase_ = max_rev[i]
for j in range(1 , i + 1 ):
lowerCamelCase_ = max(snake_case__ , prices[j - 1] + max_rev[i - j] )
lowerCamelCase_ = max_revenue_i
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : list ):
'''simple docstring'''
if n < 0:
lowerCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case__ )
if n > len(snake_case__ ):
lowerCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(snake_case__ )}"""
)
raise ValueError(snake_case__ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = [6, 10, 12, 15, 20, 23]
lowerCamelCase_ = len(snake_case__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCamelCase_ = 36
lowerCamelCase_ = top_down_cut_rod(snake_case__ , snake_case__ )
lowerCamelCase_ = bottom_up_cut_rod(snake_case__ , snake_case__ )
lowerCamelCase_ = naive_cut_rod_recursive(snake_case__ , snake_case__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 717
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['image_processor', 'tokenizer']
UpperCamelCase = 'CLIPImageProcessor'
UpperCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , A_ : str=None , A_ : Union[str, Any]=None , **A_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase__ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , A_ : Dict=None , A_ : Union[str, Any]=None , A_ : int=None , **A_ : Union[str, Any] ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase_ = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def a__ ( self : List[str] , *A_ : Dict , **A_ : int ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def a__ ( self : Optional[int] , *A_ : Optional[Any] , **A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def a__ ( self : Any ) -> str:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase__ , )
return self.image_processor
| 718
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = """T5Config"""
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Tuple , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = jnp.zeros_like(lowercase )
lowerCamelCase_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ = shifted_input_ids.at[:, 0].set(lowercase )
lowerCamelCase_ = jnp.where(shifted_input_ids == -1_00 , lowercase , lowercase )
return shifted_input_ids
class A( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class A( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class A( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
| 719
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 0
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger("transformers.models.speecht5")
lowerCamelCase : List[str] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
lowerCamelCase : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
lowerCamelCase : Dict = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
lowerCamelCase : Union[str, Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
lowerCamelCase : Tuple = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
lowerCamelCase : Optional[Any] = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
lowerCamelCase : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
lowerCamelCase : Tuple = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
lowerCamelCase : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCamelCase : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCamelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCamelCase : str = []
lowerCamelCase : str = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
lowerCamelCase : Dict = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
lowerCamelCase : List[Any] = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
lowerCamelCase : Tuple = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : List[str] , lowercase : str ):
'''simple docstring'''
for attribute in key.split('.' ):
lowerCamelCase_ = getattr(__a , __a )
if weight_type is not None:
lowerCamelCase_ = getattr(__a , __a ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Any ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ , lowerCamelCase_ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = []
if task == "s2t":
lowerCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase_ = MAPPING_S2T
lowerCamelCase_ = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCamelCase_ = None
lowerCamelCase_ = MAPPING_T2S
lowerCamelCase_ = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase_ = MAPPING_S2S
lowerCamelCase_ = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__a , __a ):
logger.info(f"""{name} was ignored""" )
continue
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCamelCase_ , lowerCamelCase_ = key.split('.*.' )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(__a )[0].split('.' )[-2]
lowerCamelCase_ = mapped_key.replace('*' , __a )
if "weight_g" in name:
lowerCamelCase_ = 'weight_g'
elif "weight_v" in name:
lowerCamelCase_ = 'weight_v'
elif "bias" in name:
lowerCamelCase_ = 'bias'
elif "weight" in name:
lowerCamelCase_ = 'weight'
elif "running_mean" in name:
lowerCamelCase_ = 'running_mean'
elif "running_var" in name:
lowerCamelCase_ = 'running_var'
elif "num_batches_tracked" in name:
lowerCamelCase_ = 'num_batches_tracked'
else:
lowerCamelCase_ = None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] , lowercase : int , lowercase : Tuple , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = full_name.split('conv_layers.' )[-1]
lowerCamelCase_ = name.split('.' )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str] , lowercase : List[str] , lowercase : Any=None , lowercase : str=None , lowercase : Union[str, Any]=None , ):
'''simple docstring'''
if config_path is not None:
lowerCamelCase_ = SpeechTaConfig.from_pretrained(__a )
else:
lowerCamelCase_ = SpeechTaConfig()
if task == "s2t":
lowerCamelCase_ = config.max_text_positions
lowerCamelCase_ = SpeechTaForSpeechToText(__a )
elif task == "t2s":
lowerCamelCase_ = 18_76
lowerCamelCase_ = 6_00
lowerCamelCase_ = config.max_speech_positions
lowerCamelCase_ = SpeechTaForTextToSpeech(__a )
elif task == "s2s":
lowerCamelCase_ = 18_76
lowerCamelCase_ = config.max_speech_positions
lowerCamelCase_ = SpeechTaForSpeechToSpeech(__a )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
lowerCamelCase_ = SpeechTaTokenizer(__a , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken('<mask>' , lstrip=__a , rstrip=__a )
lowerCamelCase_ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = SpeechTaProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(__a )
lowerCamelCase_ = torch.load(__a )
recursively_load_weights(fairseq_checkpoint['model'] , __a , __a )
model.save_pretrained(__a )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowerCamelCase : Dict = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 720
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCamelCase : List[str] = False
@skip_mps
class A( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionAttendAndExcitePipeline
UpperCamelCase = False
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls : Any ) -> Any:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def a__ ( cls : Optional[int] ) -> int:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
lowerCamelCase_ = CLIPTextModel(UpperCAmelCase__ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a__ ( self : int , A_ : List[Any] , A_ : Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(UpperCAmelCase__ )
else:
lowerCamelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowerCamelCase_ = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase__ )
lowerCamelCase_ = pipe(**UpperCAmelCase__ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase_ = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
lowerCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def a__ ( self : str ) -> str:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ ( cls : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def a__ ( cls : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = torch.manual_seed(51 )
lowerCamelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCamelCase_ = '''a painting of an elephant with glasses'''
lowerCamelCase_ = [5, 7]
lowerCamelCase_ = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 700
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
| 0
|
from __future__ import annotations
lowerCamelCase : str = tuple[int, int, int]
lowerCamelCase : List[str] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCamelCase : Tuple = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCamelCase : Tuple = "EGZWVONAHDCLFQMSIPJBYUKXTR"
lowerCamelCase : Dict = "FOBHMDKEXQNRAULPGSJVTYICZW"
lowerCamelCase : Optional[Any] = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
lowerCamelCase : List[str] = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
lowerCamelCase : int = "RMDJXFUWGISLHVTCQNKYPBEZOA"
lowerCamelCase : str = "SGLCPQWZHKXAREONTFBVIYJUDM"
lowerCamelCase : Any = "HVSICLTYKQUBXDWAJZOMFGPREN"
lowerCamelCase : Tuple = "RZWQHFMVDBKICJLNTUXAGYPSOE"
lowerCamelCase : str = "LFKIJODBEGAMQPXVUHYSTCZRWN"
lowerCamelCase : Union[str, Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCAmelCase ) )) < 3:
lowerCamelCase_ = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_lowerCAmelCase )
# Checks if rotor positions are valid
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = rotpos
if not 0 < rotorposa <= len(_lowerCAmelCase ):
lowerCamelCase_ = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
lowerCamelCase_ = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
lowerCamelCase_ = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowerCAmelCase )
# Validates string and returns dict
lowerCamelCase_ = _plugboard(_lowerCAmelCase )
return rotpos, rotsel, pbdict
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase_ = f"""Plugboard setting isn't type string ({type(_lowerCAmelCase )})"""
raise TypeError(_lowerCAmelCase )
elif len(_lowerCAmelCase ) % 2 != 0:
lowerCamelCase_ = f"""Odd number of symbols ({len(_lowerCAmelCase )})"""
raise Exception(_lowerCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCamelCase_ = set()
for i in pbstring:
if i not in abc:
lowerCamelCase_ = f"""'{i}' not in list of symbols"""
raise Exception(_lowerCAmelCase )
elif i in tmppbl:
lowerCamelCase_ = f"""Duplicate symbol ({i})"""
raise Exception(_lowerCAmelCase )
else:
tmppbl.add(_lowerCAmelCase )
del tmppbl
# Created the dictionary
lowerCamelCase_ = {}
for j in range(0 , len(_lowerCAmelCase ) - 1 , 2 ):
lowerCamelCase_ = pbstring[j + 1]
lowerCamelCase_ = pbstring[j]
return pb
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict , lowercase : Dict = (rotora, rotora, rotora) , lowercase : List[str] = "" , ):
'''simple docstring'''
lowerCamelCase_ = text.upper()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = _validator(
_lowerCAmelCase , _lowerCAmelCase , plugb.upper() )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = rotor_position
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCamelCase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCamelCase_ = plugboard[symbol]
# rotor ra --------------------------
lowerCamelCase_ = abc.index(_lowerCAmelCase ) + rotorposa
lowerCamelCase_ = rotora[index % len(_lowerCAmelCase )]
# rotor rb --------------------------
lowerCamelCase_ = abc.index(_lowerCAmelCase ) + rotorposa
lowerCamelCase_ = rotora[index % len(_lowerCAmelCase )]
# rotor rc --------------------------
lowerCamelCase_ = abc.index(_lowerCAmelCase ) + rotorposa
lowerCamelCase_ = rotora[index % len(_lowerCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCamelCase_ = reflector[symbol]
# 2nd rotors
lowerCamelCase_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
lowerCamelCase_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
lowerCamelCase_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCamelCase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
lowerCamelCase_ = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
lowerCamelCase_ = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
lowerCamelCase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = "This is my Python script that emulates the Enigma machine from WWII."
lowerCamelCase : List[Any] = (1, 1, 1)
lowerCamelCase : List[str] = "pictures"
lowerCamelCase : Any = (rotora, rotora, rotora)
lowerCamelCase : int = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 701
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 0
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCamelCase : Any = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCamelCase : Any = dataset.iloc[:, 1:2].values
lowerCamelCase : int = dataset.iloc[:, 2].values
lowerCamelCase : Union[str, Any] = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCamelCase : str = PolynomialFeatures(degree=4)
lowerCamelCase : Optional[Any] = poly_reg.fit_transform(X)
lowerCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
plt.scatter(lowerCamelCase_ , lowerCamelCase_ , color='red' )
plt.plot(lowerCamelCase_ , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase_ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 702
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
| 0
|
import argparse
import datetime
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
lowerCamelCase_ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
lowerCamelCase_ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
lowerCamelCase_ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
lowerCamelCase_ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
lowerCamelCase_ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
lowerCamelCase_ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
lowerCamelCase_ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
lowerCamelCase_ = y - 1
lowerCamelCase_ = m + 12
# maths var
lowerCamelCase_ = int(str(UpperCamelCase__ )[:2] )
lowerCamelCase_ = int(str(UpperCamelCase__ )[2:] )
lowerCamelCase_ = int(2.6 * m - 5.39 )
lowerCamelCase_ = int(c / 4 )
lowerCamelCase_ = int(k / 4 )
lowerCamelCase_ = int(d + k )
lowerCamelCase_ = int(t + u + v + x )
lowerCamelCase_ = int(z - (2 * c) )
lowerCamelCase_ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
lowerCamelCase_ = f"""Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
lowerCamelCase : List[Any] = parser.parse_args()
zeller(args.date_input)
| 703
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 1:
lowerCamelCase_ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 1
for i in range(1 , __SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
| 0
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Optional[Any] ):
'''simple docstring'''
if b == 0:
return (1, 0)
((lowerCamelCase_) , (lowerCamelCase_)) = extended_euclid(lowercase , a % b )
lowerCamelCase_ = a // b
return (y, x - k * y)
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : int ):
'''simple docstring'''
((lowerCamelCase_) , (lowerCamelCase_)) = extended_euclid(lowercase , lowercase )
lowerCamelCase_ = na * na
lowerCamelCase_ = ra * x * na + ra * y * na
return (n % m + m) % m
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
((lowerCamelCase_) , (lowerCamelCase_)) = extended_euclid(lowercase , lowercase )
if b < 0:
lowerCamelCase_ = (b % n + n) % n
return b
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
lowerCamelCase_ = na * na
lowerCamelCase_ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 705
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 0
|
import argparse
import struct
import unittest
class A:
'''simple docstring'''
def __init__( self : List[Any] , A_ : bytes ) -> None:
"""simple docstring"""
lowerCamelCase_ = data
# Initialize hash values
lowerCamelCase_ = [
0X6A_09_E6_67,
0XBB_67_AE_85,
0X3C_6E_F3_72,
0XA5_4F_F5_3A,
0X51_0E_52_7F,
0X9B_05_68_8C,
0X1F_83_D9_AB,
0X5B_E0_CD_19,
]
# Initialize round constants
lowerCamelCase_ = [
0X42_8A_2F_98,
0X71_37_44_91,
0XB5_C0_FB_CF,
0XE9_B5_DB_A5,
0X39_56_C2_5B,
0X59_F1_11_F1,
0X92_3F_82_A4,
0XAB_1C_5E_D5,
0XD8_07_AA_98,
0X12_83_5B_01,
0X24_31_85_BE,
0X55_0C_7D_C3,
0X72_BE_5D_74,
0X80_DE_B1_FE,
0X9B_DC_06_A7,
0XC1_9B_F1_74,
0XE4_9B_69_C1,
0XEF_BE_47_86,
0X0F_C1_9D_C6,
0X24_0C_A1_CC,
0X2D_E9_2C_6F,
0X4A_74_84_AA,
0X5C_B0_A9_DC,
0X76_F9_88_DA,
0X98_3E_51_52,
0XA8_31_C6_6D,
0XB0_03_27_C8,
0XBF_59_7F_C7,
0XC6_E0_0B_F3,
0XD5_A7_91_47,
0X06_CA_63_51,
0X14_29_29_67,
0X27_B7_0A_85,
0X2E_1B_21_38,
0X4D_2C_6D_FC,
0X53_38_0D_13,
0X65_0A_73_54,
0X76_6A_0A_BB,
0X81_C2_C9_2E,
0X92_72_2C_85,
0XA2_BF_E8_A1,
0XA8_1A_66_4B,
0XC2_4B_8B_70,
0XC7_6C_51_A3,
0XD1_92_E8_19,
0XD6_99_06_24,
0XF4_0E_35_85,
0X10_6A_A0_70,
0X19_A4_C1_16,
0X1E_37_6C_08,
0X27_48_77_4C,
0X34_B0_BC_B5,
0X39_1C_0C_B3,
0X4E_D8_AA_4A,
0X5B_9C_CA_4F,
0X68_2E_6F_F3,
0X74_8F_82_EE,
0X78_A5_63_6F,
0X84_C8_78_14,
0X8C_C7_02_08,
0X90_BE_FF_FA,
0XA4_50_6C_EB,
0XBE_F9_A3_F7,
0XC6_71_78_F2,
]
lowerCamelCase_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def a__ ( A_ : bytes ) -> bytes:
"""simple docstring"""
lowerCamelCase_ = b'\x80' + (b'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64))
lowerCamelCase_ = struct.pack('>Q' , (len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def a__ ( self : Tuple ) -> None:
"""simple docstring"""
lowerCamelCase_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase_ = list(struct.unpack('>16L' , __UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
lowerCamelCase_ = self.ror(__UpperCamelCase , 6 ) ^ self.ror(__UpperCamelCase , 11 ) ^ self.ror(__UpperCamelCase , 25 )
lowerCamelCase_ = (e & f) ^ ((~e & 0XFF_FF_FF_FF) & g)
lowerCamelCase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
lowerCamelCase_ = self.ror(__UpperCamelCase , 2 ) ^ self.ror(__UpperCamelCase , 13 ) ^ self.ror(__UpperCamelCase , 22 )
lowerCamelCase_ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase_ = (sa + maj) % 0X1_00_00_00_00
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
lowerCamelCase_ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase_ = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase_ = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def a__ ( self : str , A_ : int , A_ : int ) -> int:
"""simple docstring"""
return 0XFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Any ) -> None:
"""simple docstring"""
import hashlib
lowerCamelCase_ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash , hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
import doctest
doctest.testmod()
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCamelCase_ = f.read()
else:
lowerCamelCase_ = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(_SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 707
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A:
'''simple docstring'''
def a__ ( self : Any , A_ : int , A_ : Optional[Any] , A_ : Tuple ) -> str:
"""simple docstring"""
return None
class A:
'''simple docstring'''
def a__ ( self : List[str] , A_ : Tuple , A_ : List[Any] , A_ : Union[str, Any] , A_ : int ) -> List[Any]:
"""simple docstring"""
return None
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A_ , 'tf' , 12 , **A_ )
@require_torch
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A_ , 'pt' , 12 , **A_ )
@require_torch
@slow
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
from transformers import BertModel
lowerCamelCase_ = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(A_ ) )
vocab_file.flush()
lowerCamelCase_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase_ = BertModel(BertConfig(vocab_size=len(A_ ) ) )
model.save_pretrained(A_ )
self._test_export(A_ , 'pt' , 12 , A_ )
@require_tf
@slow
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ = self._test_export(A_ , 'tf' , 12 , **A_ )
lowerCamelCase_ = quantize(Path(A_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ = self._test_export(A_ , 'pt' , 12 , **A_ )
lowerCamelCase_ = quantize(A_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ ( self : Optional[int] , A_ : List[str] , A_ : List[Any] , A_ : Optional[Any] , A_ : Any=None , **A_ : Any ) -> List[str]:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(A_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(A_ , A_ , A_ , A_ , A_ , **A_ )
return path
except Exception as e:
self.fail(A_ )
@require_torch
@require_tokenizers
@slow
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
from transformers import BertModel
lowerCamelCase_ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(A_ , A_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
from transformers import TFBertModel
lowerCamelCase_ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(A_ , A_ , 'tf' )
def a__ ( self : Any , A_ : str , A_ : Union[str, Any] , A_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = FeatureExtractionPipeline(A_ , A_ )
lowerCamelCase_ = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase_ = infer_shapes(A_ , A_ )
# Assert all variables are present
self.assertEqual(len(A_ ) , len(A_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , A_ )
self.assertSequenceEqual(variable_names[3:] , A_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase_ = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase_ = ensure_valid_input(FuncContiguousArgs() , A_ , A_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(A_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(A_ ) , set(A_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(A_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase_ = ensure_valid_input(FuncNonContiguousArgs() , A_ , A_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(A_ ) , 1 )
self.assertEqual(len(A_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def a__ ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 708
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 0
|
import functools
from typing import Any
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : list[str] ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not all(
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
lowerCamelCase_ = {}
lowerCamelCase_ = 'WORD_KEEPER'
for word in words:
lowerCamelCase_ = trie
for c in word:
if c not in trie_node:
lowerCamelCase_ = {}
lowerCamelCase_ = trie_node[c]
lowerCamelCase_ = True
lowerCamelCase_ = len(lowerCAmelCase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase : int ) -> bool:
if index == len_string:
return True
lowerCamelCase_ = trie
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCamelCase_ = trie_node.get(string[i] , lowerCAmelCase__ )
if trie_node is None:
return False
if trie_node.get(lowerCAmelCase__ , lowerCAmelCase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 0
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCamelCase : List[str] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowerCamelCase : Any = logging.WARNING
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = os.getenv('DATASETS_VERBOSITY' , lowercase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return __name__.split('.' )[0]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] = None ):
'''simple docstring'''
if name is None:
lowerCamelCase_ = _get_library_name()
return logging.getLogger(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
_get_library_root_logger().setLevel(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return set_verbosity(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return set_verbosity(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return set_verbosity(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return set_verbosity(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = False
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A:
'''simple docstring'''
def __init__( self : Any , *A_ : str , **A_ : Optional[Any] ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
lowerCamelCase_ = args[0] if args else None
def __iter__( self : Optional[int] ) -> Any:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : int , A_ : Optional[int] ) -> Dict:
"""simple docstring"""
def empty_fn(*A_ : Optional[int] , **A_ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self
def __exit__( self : int , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Optional[int] ) -> str:
"""simple docstring"""
return
lowerCamelCase : Dict = True
class A:
'''simple docstring'''
def __call__( self : Dict , *A_ : int , A_ : Any=False , **A_ : Any ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCamelCase_ , **lowerCamelCase_ )
else:
return EmptyTqdm(*lowerCamelCase_ , **lowerCamelCase_ )
def a__ ( self : str , *A_ : Union[str, Any] , **A_ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase_ , **lowerCamelCase_ )
def a__ ( self : int ) -> Any:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase : Union[str, Any] = _tqdm_cls()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
global _tqdm_active
lowerCamelCase_ = True
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
global _tqdm_active
lowerCamelCase_ = False
| 710
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
while cur > 1:
# Find the maximum number in arr
lowerCamelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCamelCase_ = arr[mi::-1] + arr[mi + 1 : len(lowercase )]
# Reverse whole list
lowerCamelCase_ = arr[cur - 1 :: -1] + arr[cur : len(lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : int = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 711
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 0
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Tuple , lowercase : List[Any]=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCamelCase_ = nn.Parameter(__lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCamelCase_ = nn.Parameter(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = np.asarray(weights[0] )
lowerCamelCase_ = np.asarray(weights[1] )
lowerCamelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Tuple , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = np.asarray(weights[0] )
lowerCamelCase_ = np.asarray(weights[1] )
lowerCamelCase_ = np.asarray(weights[2] )
lowerCamelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = weights[0][0][0]
lowerCamelCase_ = np.asarray(layer_norm_a[0] )
lowerCamelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# lsh weights + output
lowerCamelCase_ = weights[0][1]
if len(__lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
else:
set_layer_weights_in_torch_local(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
# intermediate weighs
lowerCamelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCAmelCase ) == 4:
lowerCamelCase_ = intermediate_weights[2]
# layernorm 2
lowerCamelCase_ = np.asarray(intermediate_weights[0][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# intermediate dense
lowerCamelCase_ = np.asarray(intermediate_weights[1][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
# intermediate out
lowerCamelCase_ = np.asarray(intermediate_weights[4][0] )
lowerCamelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = torch_model.reformer
# word embeds
lowerCamelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCAmelCase ) , )
if isinstance(weights[3] , __lowerCAmelCase ):
lowerCamelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCamelCase_ = nn.Parameter(torch.tensor(__lowerCAmelCase ) )
lowerCamelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# output layer norm
lowerCamelCase_ = np.asarray(weights[7][0] )
lowerCamelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# output embeddings
lowerCamelCase_ = np.asarray(weights[9][0] )
lowerCamelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[int] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = ReformerConfig.from_json_file(__lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase_ = ReformerModelWithLMHead(__lowerCAmelCase )
with open(__lowerCAmelCase , 'rb' ) as f:
lowerCamelCase_ = pickle.load(__lowerCAmelCase )['weights']
set_model_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 712
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCamelCase : Tuple = ["bert-base-uncased", "bert-base-cased"]
lowerCamelCase : Union[str, Any] = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class A( tf.keras.Model ):
'''simple docstring'''
def __init__( self : int , A_ : Any ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
lowerCamelCase_ = TFAutoModel.from_config(_UpperCAmelCase )
def a__ ( self : Dict , A_ : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer(_UpperCAmelCase )
lowerCamelCase_ = self.bert(**_UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
BertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase_ = [TFBertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_UpperCAmelCase , use_fast_bert_tokenizer=_UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='tf' , padding='longest' )
lowerCamelCase_ = tf_tokenizer(_UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf_tokenizer(self.paired_sentences )
lowerCamelCase_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.function(_UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tf.constant(_UpperCAmelCase )
lowerCamelCase_ = compiled_tokenizer(_UpperCAmelCase )
lowerCamelCase_ = tf_tokenizer(_UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = ModelToSave(tokenizer=_UpperCAmelCase )
lowerCamelCase_ = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase_ = model(_UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(_UpperCAmelCase ) / '''saved.model'''
model.save(_UpperCAmelCase )
lowerCamelCase_ = tf.keras.models.load_model(_UpperCAmelCase )
lowerCamelCase_ = loaded_model(_UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class A( _a ):
'''simple docstring'''
def __init__( self : int , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : str , A_ : List[Any] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def a__ ( self : Optional[Any] , A_ : Union[str, Any] = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self : List[Any] , A_ : List[Any] , A_ : Tuple=16000 , A_ : Optional[Any] = 512 , A_ : str = 512 , A_ : List[Any] = 50 , A_ : Any = 7.5 , A_ : List[str] = None , A_ : Optional[int] = 1 , A_ : Dict = 0.0 , A_ : Dict = None , A_ : Optional[int] = None , A_ : str = "pil" , A_ : str = True , A_ : Optional[int] = None , A_ : Optional[Any] = 1 , **A_ : Dict , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.speech_processor.feature_extractor(
_A , return_tensors='pt' , sampling_rate=_A ).input_features.to(self.device )
lowerCamelCase_ = self.speech_model.generate(_A , max_length=480000 )
lowerCamelCase_ = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
lowerCamelCase_ = 1
elif isinstance(_A , _A ):
lowerCamelCase_ = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
_A , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , _A , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = [''] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
f""" {type(_A )}.""" )
elif isinstance(_A , _A ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
_A , padding='max_length' , max_length=_A , truncation=_A , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(1 , _A , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(_A , generator=_A , device='cpu' , dtype=_A ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCamelCase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
lowerCamelCase_ = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
lowerCamelCase_ = 1 / 0.18215 * latents
lowerCamelCase_ = self.vae.decode(_A ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 714
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''switch_transformers'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , A_ : int=32128 , A_ : Optional[Any]=768 , A_ : Optional[Any]=64 , A_ : Optional[int]=2048 , A_ : List[str]=64 , A_ : int=12 , A_ : List[Any]=3 , A_ : Union[str, Any]=12 , A_ : Optional[int]=3 , A_ : str=12 , A_ : Optional[int]=8 , A_ : List[str]=False , A_ : Any=0.01 , A_ : Union[str, Any]="float32" , A_ : List[str]=False , A_ : int=32 , A_ : Tuple=128 , A_ : List[str]=0.1 , A_ : Union[str, Any]=1E-6 , A_ : Union[str, Any]=0.001 , A_ : Any=0.001 , A_ : Dict=1.0 , A_ : Optional[int]="relu" , A_ : List[str]=True , A_ : List[str]=False , A_ : int=True , A_ : Tuple=0 , A_ : Optional[Any]=1 , **A_ : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = d_model
lowerCamelCase_ = d_kv
lowerCamelCase_ = d_ff
lowerCamelCase_ = num_sparse_encoder_layers
lowerCamelCase_ = num_layers
lowerCamelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase_ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCamelCase_ = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCamelCase_ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCamelCase_ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCamelCase_ = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCamelCase_ = num_heads
lowerCamelCase_ = num_experts
lowerCamelCase_ = expert_capacity
lowerCamelCase_ = router_bias
lowerCamelCase_ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
lowerCamelCase_ = router_dtype
lowerCamelCase_ = router_ignore_padding_tokens
lowerCamelCase_ = relative_attention_num_buckets
lowerCamelCase_ = relative_attention_max_distance
lowerCamelCase_ = dropout_rate
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_factor
lowerCamelCase_ = feed_forward_proj
lowerCamelCase_ = use_cache
lowerCamelCase_ = add_router_probs
lowerCamelCase_ = router_z_loss_coef
lowerCamelCase_ = router_aux_loss_coef
lowerCamelCase_ = self.feed_forward_proj.split('-' )
lowerCamelCase_ = act_info[-1]
lowerCamelCase_ = act_info[0] == 'gated'
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase_ = 'gelu_new'
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase , )
| 715
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase : int = random.Random()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Tuple=1.0 , lowercase : Optional[int]=None , lowercase : List[Any]=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : str , A_ : int=7 , A_ : Union[str, Any]=400 , A_ : Dict=2000 , A_ : Optional[int]=2048 , A_ : List[Any]=128 , A_ : str=1 , A_ : Optional[int]=512 , A_ : Optional[int]=30 , A_ : List[Any]=44100 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = spectrogram_length
lowerCamelCase_ = feature_size
lowerCamelCase_ = num_audio_channels
lowerCamelCase_ = hop_length
lowerCamelCase_ = chunk_length
lowerCamelCase_ = sampling_rate
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self : Any , A_ : Dict=False , A_ : List[str]=False ) -> List[str]:
"""simple docstring"""
def _flatten(A_ : List[Any] ):
return list(itertools.chain(*A_ ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = TvltFeatureExtractor
def a__ ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = TvltFeatureExtractionTester(self )
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(A_ , 'feature_size' ) )
self.assertTrue(hasattr(A_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(A_ , 'hop_length' ) )
self.assertTrue(hasattr(A_ , 'chunk_length' ) )
self.assertTrue(hasattr(A_ , 'sampling_rate' ) )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(A_ )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop('mel_filters' )
lowerCamelCase_ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(A_ )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop('mel_filters' )
lowerCamelCase_ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCamelCase_ = feature_extractor(
A_ , return_tensors='np' , sampling_rate=44100 , mask_audio=A_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(A_ )
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self : Union[str, Any] , A_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = TvltFeatureExtractor()
lowerCamelCase_ = feature_extractor(A_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCamelCase_ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A_ , atol=1E-4 ) )
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 0
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__UpperCAmelCase )} )
UpperCamelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
UpperCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
UpperCamelCase = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
UpperCamelCase = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
UpperCamelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=__UpperCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
UpperCamelCase = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCamelCase = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCamelCase = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
UpperCamelCase = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class A( __UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = '''train'''
UpperCamelCase = '''dev'''
class A( __UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self : Optional[int] , A_ : str , A_ : Any , A_ : Any = None , A_ : int = Split.train , A_ : str = False , A_ : Optional[Any] = None , A_ : List[str] = "pt" , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A_ , A_ ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = 'v2' if args.version_2_with_negative else 'v1'
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + '.lock'
with FileLock(A_ ):
if os.path.exists(A_ ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(A_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features['features']
lowerCamelCase_ = self.old_features.get('dataset' , A_ )
lowerCamelCase_ = self.old_features.get('examples' , A_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
' future run' )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A_ , )
lowerCamelCase_ = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , A_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Any , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 717
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 0
|
import argparse
from collections import defaultdict
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[Any] , lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase , 'r' ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = f"""class {class_name}("""
lowerCamelCase_ = f"""{4 * " "}def {test_name}("""
lowerCamelCase_ = f"""{8 * " "}{correct_line.split()[0]}"""
lowerCamelCase_ = f"""{16 * " "}{correct_line.split()[0]}"""
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = []
for line in lines:
if line.startswith(lowercase ):
lowerCamelCase_ = True
elif in_class and line.startswith(lowercase ):
lowerCamelCase_ = True
elif in_class and in_func and (line.startswith(lowercase ) or line.startswith(lowercase )):
lowerCamelCase_ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCamelCase_ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase_ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
lowerCamelCase_ = False
else:
new_lines.append(lowercase )
with open(lowercase , 'w' ) as f:
for line in new_lines:
f.write(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : List[Any]=None ):
'''simple docstring'''
if fail is not None:
with open(lowercase , 'r' ) as f:
lowerCamelCase_ = {l.strip() for l in f.readlines()}
else:
lowerCamelCase_ = None
with open(lowercase , 'r' ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = defaultdict(lowercase )
for line in correct_lines:
lowerCamelCase_ = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
lowerCamelCase : Tuple = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 718
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 0
|
lowerCamelCase : Optional[int] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 719
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase : Optional[int] = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class A( unittest.TestCase , lowercase_ ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = load_tool('text-question-answering' )
self.tool.setup()
lowerCamelCase_ = load_tool('text-question-answering' , remote=A_ )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tool(A_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(A_ , 'launched the BigScience Research Workshop' )
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.remote_tool(A_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(A_ , 'launched the BigScience Research Workshop' )
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.tool(text=A_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(A_ , 'launched the BigScience Research Workshop' )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.remote_tool(text=A_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(A_ , 'launched the BigScience Research Workshop' )
| 720
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase : int = False
class A( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A painting of a squirrel eating a burger '
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A_ )
lowerCamelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = generator.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A painting of a squirrel eating a burger '
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowerCamelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 721
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651
| 0
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class A:
'''simple docstring'''
def __init__( self : Optional[int] , A_ : List[str] , A_ : Optional[int]=3 , A_ : List[str]=7 , A_ : Dict=True , A_ : Tuple=True , A_ : List[Any]=False , A_ : Tuple=True , A_ : Union[str, Any]=99 , A_ : List[str]=32 , A_ : List[str]=5 , A_ : Tuple=4 , A_ : Union[str, Any]=37 , A_ : Optional[int]="gelu" , A_ : str=0.1 , A_ : List[str]=0.1 , A_ : Tuple=512 , A_ : Any=16 , A_ : int=2 , A_ : List[Any]=0.02 , A_ : int=3 , A_ : int=4 , A_ : List[str]=None , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , )
def a__ ( self : Optional[int] , A_ : str , A_ : str , A_ : str , A_ : List[str] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = FalconModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Any , A_ : Optional[int] , A_ : Any , A_ : List[str] , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Tuple , ) -> Any:
"""simple docstring"""
lowerCamelCase_ = True
lowerCamelCase_ = FalconModel(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
lowerCamelCase_ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
lowerCamelCase_ = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Optional[int] , A_ : Dict , A_ : Any , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : str , A_ : Optional[Any] , A_ : Dict , A_ : List[str] , A_ : Union[str, Any] , ) -> int:
"""simple docstring"""
lowerCamelCase_ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Tuple , A_ : str , A_ : Any , A_ : List[Any] , A_ : List[str] , A_ : Dict , A_ : str , A_ : Union[str, Any] , A_ : int , A_ : List[Any] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
lowerCamelCase_ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
lowerCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
lowerCamelCase_ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
UpperCamelCase = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FalconModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def a__ ( self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , *lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase_ = alibi
self.model_tester.create_and_check_model(A_ , *A_ )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = input_dict['input_ids']
lowerCamelCase_ = input_ids.ne(1 ).to(A_ )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = 'single_label_classification'
lowerCamelCase_ = input_dict['input_ids']
lowerCamelCase_ = input_ids.ne(1 ).to(A_ )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = input_dict['input_ids']
lowerCamelCase_ = FalconForCausalLM(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , use_cache=A_ )
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase_ = model._convert_cache_to_standard_format(A_ , A_ )
for layer in range(len(A_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = 'multi_label_classification'
lowerCamelCase_ = input_dict['input_ids']
lowerCamelCase_ = input_ids.ne(1 ).to(A_ )
lowerCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(A_ , 'use_cache' ):
return
lowerCamelCase_ = model_class(A_ ).to(A_ )
if "use_cache" not in inputs:
lowerCamelCase_ = True
lowerCamelCase_ = model(**A_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase_ = (
getattr(A_ , 'decoder_layers' , A_ )
or getattr(A_ , 'num_decoder_layers' , A_ )
or config.num_hidden_layers
)
lowerCamelCase_ = getattr(A_ , 'num_kv_heads' , config.num_attention_heads )
lowerCamelCase_ = getattr(A_ , 'd_model' , config.hidden_size )
lowerCamelCase_ = embed_dim // num_attention_heads
lowerCamelCase_ = outputs['past_key_values']
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ = inputs['input_ids'].shape
for i in range(A_ ):
if config.new_decoder_architecture:
lowerCamelCase_ = config.num_attention_heads
elif config.multi_query:
lowerCamelCase_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCamelCase_ = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(A_ )
lowerCamelCase_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ )
lowerCamelCase_ = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCamelCase_ = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 )
lowerCamelCase_ = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(A_ )
lowerCamelCase_ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(A_ )
lowerCamelCase_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , num_beams=2 , max_new_tokens=4 )
@slow
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(A_ )
lowerCamelCase_ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(device=A_ )
lowerCamelCase_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ )
# Test results are the same with and without cache
lowerCamelCase_ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
lowerCamelCase_ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 700
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
| 0
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *A_ : Dict , **A_ : Union[str, Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 701
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 0
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = "Hello, World!"
lowerCamelCase : Union[str, Any] = "en_XX"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : bool ):
'''simple docstring'''
lowerCamelCase_ = Path('data_bin' )
lowerCamelCase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowercase ).parent ) , checkpoint_file=Path(lowercase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(lowercase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(lowercase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(lowercase )
lowerCamelCase_ = xmod.model.encoder.sentence_encoder
lowerCamelCase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , lowercase )
lowerCamelCase_ = XmodForSequenceClassification(lowercase ) if classification_head else XmodForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
lowerCamelCase_ = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
lowerCamelCase_ = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
lowerCamelCase_ = xmod_layer.fca.weight
lowerCamelCase_ = xmod_layer.fca.bias
# output
lowerCamelCase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
lowerCamelCase_ = xmod_layer.fca.weight
lowerCamelCase_ = xmod_layer.fca.bias
lowerCamelCase_ = xmod_layer.final_layer_norm.weight
lowerCamelCase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ = bert_output.adapter_modules[lang_code]
lowerCamelCase_ = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ = from_adapter.fca.weight
lowerCamelCase_ = from_adapter.fca.bias
lowerCamelCase_ = from_adapter.fca.weight
lowerCamelCase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ = xmod.model.classification_heads['mnli'].dense.weight
lowerCamelCase_ = xmod.model.classification_heads['mnli'].dense.bias
lowerCamelCase_ = xmod.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase_ = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase_ = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = xmod.model.encoder.lm_head.weight
lowerCamelCase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = xmod.encode(lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowercase )
lowerCamelCase_ = model(lowercase )[0]
if classification_head:
lowerCamelCase_ = xmod.model.classification_heads['mnli'](xmod.extract_features(lowercase ) )
else:
lowerCamelCase_ = xmod.model(lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 702
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
| 0
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = sd_pipe.prepare_inputs(A_ )
lowerCamelCase_ = replicate(A_ )
lowerCamelCase_ = shard(A_ )
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = jax.random.split(A_ , jax.device_count() )
lowerCamelCase_ = sd_pipe(A_ , A_ , A_ , num_inference_steps=25 , jit=A_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase_ = images[0, 253:256, 253:256, -1]
lowerCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase_ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = 'stabilityai/stable-diffusion-2'
lowerCamelCase_ , lowerCamelCase_ = FlaxDPMSolverMultistepScheduler.from_pretrained(A_ , subfolder='scheduler' )
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
A_ , scheduler=A_ , revision='bf16' , dtype=jnp.bfloataa , )
lowerCamelCase_ = scheduler_params
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = sd_pipe.prepare_inputs(A_ )
lowerCamelCase_ = replicate(A_ )
lowerCamelCase_ = shard(A_ )
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = jax.random.split(A_ , jax.device_count() )
lowerCamelCase_ = sd_pipe(A_ , A_ , A_ , num_inference_steps=25 , jit=A_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase_ = images[0, 253:256, 253:256, -1]
lowerCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase_ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 703
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 704
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCamelCase = None
lowerCamelCase = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCamelCase = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = '''PIL.Image.Image'''
UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase = field(default='''Image''' , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self : str ) -> Optional[int]:
"""simple docstring"""
return self.pa_type
def a__ ( self : Optional[Any] , A_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(A_ , A_ ):
lowerCamelCase_ = np.array(A_ )
if isinstance(A_ , A_ ):
return {"path": value, "bytes": None}
elif isinstance(A_ , A_ ):
return {"path": None, "bytes": value}
elif isinstance(A_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A_ )
elif isinstance(A_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A_ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def a__ ( self : Optional[int] , A_ : dict , A_ : Dict=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
lowerCamelCase_ = {}
lowerCamelCase_ , lowerCamelCase_ = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(A_ ):
lowerCamelCase_ = PIL.Image.open(A_ )
else:
lowerCamelCase_ = path.split('::' )[-1]
try:
lowerCamelCase_ = string_to_dict(A_ , config.HUB_DATASETS_URL )['repo_id']
lowerCamelCase_ = token_per_repo_id.get(A_ )
except ValueError:
lowerCamelCase_ = None
with xopen(A_ , 'rb' , use_auth_token=A_ ) as f:
lowerCamelCase_ = BytesIO(f.read() )
lowerCamelCase_ = PIL.Image.open(bytes_ )
else:
lowerCamelCase_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def a__ ( self : Optional[int] , A_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.binary() )
lowerCamelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.string() )
lowerCamelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
lowerCamelCase_ = storage.field('bytes' )
else:
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
lowerCamelCase_ = storage.field('path' )
else:
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.string() )
lowerCamelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase_ = pa.array(
[encode_np_array(np.array(A_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCamelCase_ = pa.array([None] * len(A_ ) , type=pa.string() )
lowerCamelCase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(A_ , self.pa_type )
def a__ ( self : str , A_ : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(A_ : List[Any] ):
with xopen(A_ , 'rb' ) as f:
lowerCamelCase_ = f.read()
return bytes_
lowerCamelCase_ = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase_ = pa.array(
[os.path.basename(A_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
lowerCamelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(A_ , self.pa_type )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE ( lowercase : "PIL.Image.Image" ):
'''simple docstring'''
lowerCamelCase_ = BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase_ = image.format
else:
lowerCamelCase_ = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowercase , format=lowercase )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE ( lowercase : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(lowercase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase )}
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
lowerCamelCase_ = array.dtype
lowerCamelCase_ = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
lowerCamelCase_ = dtype.kind
lowerCamelCase_ = dtype.itemsize
lowerCamelCase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase_ = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase_ = dtype_byteorder + dtype_kind + str(lowercase )
lowerCamelCase_ = np.dtype(lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
lowerCamelCase_ = PIL.Image.fromarray(array.astype(lowercase ) )
return {"path": None, "bytes": image_to_bytes(lowercase )}
def _SCREAMING_SNAKE_CASE ( lowercase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
lowerCamelCase_ , lowerCamelCase_ = first_non_null_value(lowercase )
if isinstance(lowercase , lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase , np.ndarray ):
lowerCamelCase_ = no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
elif isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 705
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase : List[str] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[Any] ):
'''simple docstring'''
inspect_dataset(lowercase , lowercase )
lowerCamelCase_ = path + '.py'
assert script_name in os.listdir(lowercase )
assert "__pycache__" not in os.listdir(lowercase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
inspect_metric(lowercase , lowercase )
lowerCamelCase_ = path + '.py'
assert script_name in os.listdir(lowercase )
assert "__pycache__" not in os.listdir(lowercase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Any , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = get_dataset_config_info(lowercase , config_name=lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Any , lowercase : Any ):
'''simple docstring'''
with pytest.raises(lowercase ):
get_dataset_config_info(lowercase , config_name=lowercase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = get_dataset_config_names(lowercase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Any , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = get_dataset_infos(lowercase )
assert list(infos.keys() ) == expected_configs
lowerCamelCase_ = expected_configs[0]
assert expected_config in infos
lowerCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Dict , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = get_dataset_infos(lowercase )
assert expected_config in infos
lowerCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
with pytest.raises(lowercase ):
get_dataset_split_names(lowercase , config_name=lowercase )
| 706
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 0
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase : List[str] = TypeVar("KEY")
lowerCamelCase : str = TypeVar("VAL")
@dataclass(frozen=UpperCamelCase , slots=UpperCamelCase )
class A( Generic[KEY, VAL] ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
class A( _Item ):
'''simple docstring'''
def __init__( self : Tuple ) -> None:
"""simple docstring"""
super().__init__(A_ , A_ )
def __bool__( self : List[Any] ) -> bool:
"""simple docstring"""
return False
lowerCamelCase : Optional[Any] = _DeletedItem()
class A( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : str , A_ : int = 8 , A_ : float = 0.75 ) -> None:
"""simple docstring"""
lowerCamelCase_ = initial_block_size
lowerCamelCase_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase_ = capacity_factor
lowerCamelCase_ = 0
def a__ ( self : Dict , A_ : KEY ) -> int:
"""simple docstring"""
return hash(A_ ) % len(self._buckets )
def a__ ( self : List[Any] , A_ : int ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def a__ ( self : Tuple , A_ : int , A_ : KEY , A_ : VAL ) -> bool:
"""simple docstring"""
lowerCamelCase_ = self._buckets[ind]
if not stored:
lowerCamelCase_ = _Item(A_ , A_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase_ = _Item(A_ , A_ )
return True
else:
return False
def a__ ( self : List[str] ) -> bool:
"""simple docstring"""
lowerCamelCase_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(A_ )
def a__ ( self : Dict ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ ( self : Any , A_ : int ) -> None:
"""simple docstring"""
lowerCamelCase_ = self._buckets
lowerCamelCase_ = [None] * new_size
lowerCamelCase_ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def a__ ( self : Tuple ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def a__ ( self : Any ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def a__ ( self : Tuple , A_ : KEY ) -> Iterator[int]:
"""simple docstring"""
lowerCamelCase_ = self._get_bucket_index(A_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase_ = self._get_next_ind(A_ )
def a__ ( self : int , A_ : KEY , A_ : VAL ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(A_ ):
if self._try_set(A_ , A_ , A_ ):
break
def __setitem__( self : List[str] , A_ : KEY , A_ : VAL ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(A_ , A_ )
def __delitem__( self : int , A_ : KEY ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(A_ ):
lowerCamelCase_ = self._buckets[ind]
if item is None:
raise KeyError(A_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[int] , A_ : KEY ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(A_ ):
lowerCamelCase_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A_ )
def __len__( self : int ) -> int:
"""simple docstring"""
return self._len
def __iter__( self : List[Any] ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 707
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''ibert'''
def __init__( self : List[str] , A_ : Optional[Any]=30522 , A_ : str=768 , A_ : List[str]=12 , A_ : int=12 , A_ : List[str]=3072 , A_ : Tuple="gelu" , A_ : Optional[int]=0.1 , A_ : Union[str, Any]=0.1 , A_ : Any=512 , A_ : int=2 , A_ : Optional[Any]=0.02 , A_ : Optional[Any]=1E-12 , A_ : List[Any]=1 , A_ : Optional[Any]=0 , A_ : List[Any]=2 , A_ : Dict="absolute" , A_ : Any=False , A_ : List[str]="none" , **A_ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = quant_mode
lowerCamelCase_ = force_dequant
class A( UpperCamelCase ):
'''simple docstring'''
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 708
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 0
|
from jiwer import compute_measures
import datasets
lowerCamelCase : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowerCamelCase : Union[str, Any] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowerCamelCase : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def a__ ( self : int , A_ : List[str]=None , A_ : Union[str, Any]=None , A_ : Dict=False ) -> Any:
"""simple docstring"""
if concatenate_texts:
return compute_measures(A_ , A_ )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(A_ , A_ ):
lowerCamelCase_ = compute_measures(A_ , A_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 709
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 0
|
import math
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , lowercase , 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def _SCREAMING_SNAKE_CASE ( lowercase : int = 99_99_66_66_33_33 ):
'''simple docstring'''
lowerCamelCase_ = math.floor(math.sqrt(lowercase ) ) + 1_00
lowerCamelCase_ = prime_sieve(lowercase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 710
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 0
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : Optional[Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase : Optional[int] = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCamelCase : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCamelCase : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def a__ ( self : int , A_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def a__ ( self : Dict , A_ : Optional[Any] , A_ : Any , A_ : Tuple=0.9 , A_ : List[Any]=3 , A_ : Optional[Any]=0.5 ) -> Optional[int]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
lowerCamelCase_ = [
meteor_score.single_meteor_score(
word_tokenize(A_ ) , word_tokenize(A_ ) , alpha=A_ , beta=A_ , gamma=A_ )
for ref, pred in zip(A_ , A_ )
]
else:
lowerCamelCase_ = [
meteor_score.single_meteor_score(A_ , A_ , alpha=A_ , beta=A_ , gamma=A_ )
for ref, pred in zip(A_ , A_ )
]
return {"meteor": np.mean(A_ )}
| 711
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''sew-d'''
def __init__( self : int , A_ : Optional[int]=32 , A_ : int=768 , A_ : List[str]=12 , A_ : str=12 , A_ : int=3072 , A_ : Tuple=2 , A_ : str=512 , A_ : List[str]=256 , A_ : Any=True , A_ : List[Any]=True , A_ : str=("p2c", "c2p") , A_ : Dict="layer_norm" , A_ : List[str]="gelu_python" , A_ : str=0.1 , A_ : Optional[int]=0.1 , A_ : Optional[Any]=0.1 , A_ : Tuple=0.0 , A_ : Tuple=0.1 , A_ : Union[str, Any]=0.02 , A_ : Dict=1E-7 , A_ : Union[str, Any]=1E-5 , A_ : Any="group" , A_ : Union[str, Any]="gelu" , A_ : Optional[int]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A_ : Any=False , A_ : List[str]=128 , A_ : Tuple=16 , A_ : int=True , A_ : Tuple=0.05 , A_ : Any=10 , A_ : int=2 , A_ : Any=0.0 , A_ : int=10 , A_ : List[str]=0 , A_ : Optional[int]="mean" , A_ : str=False , A_ : List[Any]=False , A_ : Union[str, Any]=256 , A_ : Optional[Any]=0 , A_ : List[Any]=1 , A_ : int=2 , **A_ : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_norm
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = squeeze_factor
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = position_buckets
lowerCamelCase_ = share_att_key
lowerCamelCase_ = relative_attention
lowerCamelCase_ = norm_rel_ebd
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = feature_layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# sequence classification
lowerCamelCase_ = use_weighted_layer_sum
lowerCamelCase_ = classifier_proj_size
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 712
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 0
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : Any = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
lowerCamelCase : str = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
lowerCamelCase : str = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : str = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = "Normal"
if result[0][0] == 1:
lowerCamelCase : str = "Abnormality detected"
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 0
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , A_ : Distribution , A_ : Any=None , A_ : str=None , A_ : Optional[int]=0 ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 1.0 if scale is None else scale
lowerCamelCase_ = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return self.variance.sqrt()
class A( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : int , A_ : Dict[str, int] , A_ : Callable[..., Tuple[torch.Tensor]] , **A_ : Any ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = args_dim
lowerCamelCase_ = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
lowerCamelCase_ = domain_map
def a__ ( self : Any , A_ : torch.Tensor ) -> Tuple[torch.Tensor]:
"""simple docstring"""
lowerCamelCase_ = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class A( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , A_ : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = function
def a__ ( self : Dict , A_ : Dict , *A_ : str ) -> Any:
"""simple docstring"""
return self.function(A_ , *A_ )
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self : List[Any] , A_ : int = 1 ) -> None:
"""simple docstring"""
lowerCamelCase_ = dim
lowerCamelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self : Any , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def a__ ( self : List[Any] , A_ : Tuple , A_ : Optional[torch.Tensor] = None , A_ : Optional[torch.Tensor] = None , ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def a__ ( self : Tuple ) -> float:
"""simple docstring"""
return 0.0
def a__ ( self : int , A_ : int ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self : Any , *A_ : torch.Tensor ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def a__ ( A_ : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = {'''df''': 1, '''loc''': 1, '''scale''': 1}
UpperCamelCase = StudentT
@classmethod
def a__ ( cls : Dict , A_ : torch.Tensor , A_ : torch.Tensor , A_ : torch.Tensor ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase_ = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = {'''loc''': 1, '''scale''': 1}
UpperCamelCase = Normal
@classmethod
def a__ ( cls : Any , A_ : torch.Tensor , A_ : torch.Tensor ) -> str:
"""simple docstring"""
lowerCamelCase_ = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = {'''total_count''': 1, '''logits''': 1}
UpperCamelCase = NegativeBinomial
@classmethod
def a__ ( cls : Optional[int] , A_ : torch.Tensor , A_ : torch.Tensor ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self : List[Any] , A_ : Union[str, Any] ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def a__ ( self : Optional[Any] , A_ : Optional[int] , A_ : Optional[torch.Tensor] = None , A_ : Optional[torch.Tensor] = None ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 714
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 0
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCamelCase : int = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCamelCase : Optional[int] = 0
lowerCamelCase : List[Any] = 0xe0_00
lowerCamelCase : List[Any] = 0xe0_01
lowerCamelCase : Any = 0xe0_02
lowerCamelCase : Dict = 0xe0_03
lowerCamelCase : Union[str, Any] = 0xe0_04
# Maps special codepoints to human-readable names.
lowerCamelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCamelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , A_ : int=chr(A_ ) , A_ : Tuple=chr(A_ ) , A_ : Dict=chr(A_ ) , A_ : Optional[int]=chr(A_ ) , A_ : Any=chr(A_ ) , A_ : List[str]=chr(A_ ) , A_ : str=False , A_ : Dict=2048 , **A_ : Dict , ) -> Any:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , model_max_length=A_ , **A_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCamelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCamelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCamelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCamelCase_ = UNICODE_VOCAB_SIZE
lowerCamelCase_ = len(self._special_codepoints )
@property
def a__ ( self : int ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def a__ ( self : List[str] , A_ : str ) -> List[str]:
"""simple docstring"""
return list(A_ )
def a__ ( self : int , A_ : str ) -> int:
"""simple docstring"""
try:
return ord(A_ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def a__ ( self : List[str] , A_ : int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(A_ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def a__ ( self : Tuple , A_ : List[Any] ) -> Any:
"""simple docstring"""
return "".join(A_ )
def a__ ( self : List[str] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def a__ ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
lowerCamelCase_ = [1] + ([0] * len(A_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(A_ )) + [1]
return result
def a__ ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Any:
"""simple docstring"""
return ()
| 715
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
debug_launcher(test_script.main )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , A_ : Dict , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : int , A_ : int = 1 , A_ : int = 100 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[float] = None , A_ : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
lowerCamelCase_ = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase_ = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
lowerCamelCase_ = int(A_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ )
# set step values
self.scheduler.set_timesteps(A_ , device=audio.device )
lowerCamelCase_ = self.scheduler.timesteps.to(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ = self.unet(A_ , A_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample
lowerCamelCase_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCamelCase_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=A_ )
| 717
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 0
|
import requests
lowerCamelCase : List[Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 718
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 0
|
from ...configuration_utils import PretrainedConfig
lowerCamelCase : List[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''tapas'''
def __init__( self : int , A_ : str=30522 , A_ : Tuple=768 , A_ : List[str]=12 , A_ : int=12 , A_ : Tuple=3072 , A_ : List[Any]="gelu" , A_ : int=0.1 , A_ : Dict=0.1 , A_ : List[Any]=1024 , A_ : Optional[Any]=[3, 256, 256, 2, 256, 256, 10] , A_ : Optional[Any]=0.02 , A_ : Any=1E-12 , A_ : List[Any]=0 , A_ : Optional[int]=10.0 , A_ : int=0 , A_ : Any=1.0 , A_ : Tuple=None , A_ : List[str]=1.0 , A_ : Optional[Any]=False , A_ : Tuple=None , A_ : Optional[Any]=1.0 , A_ : List[str]=1.0 , A_ : int=False , A_ : Optional[Any]=False , A_ : Union[str, Any]="ratio" , A_ : Any=None , A_ : int=None , A_ : str=64 , A_ : Any=32 , A_ : List[Any]=False , A_ : List[Any]=True , A_ : List[str]=False , A_ : Union[str, Any]=False , A_ : str=True , A_ : Tuple=False , A_ : List[Any]=None , A_ : Any=None , **A_ : Tuple , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_sizes
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCamelCase_ = positive_label_weight
lowerCamelCase_ = num_aggregation_labels
lowerCamelCase_ = aggregation_loss_weight
lowerCamelCase_ = use_answer_as_supervision
lowerCamelCase_ = answer_loss_importance
lowerCamelCase_ = use_normalized_answer_loss
lowerCamelCase_ = huber_loss_delta
lowerCamelCase_ = temperature
lowerCamelCase_ = aggregation_temperature
lowerCamelCase_ = use_gumbel_for_cells
lowerCamelCase_ = use_gumbel_for_aggregation
lowerCamelCase_ = average_approximation_function
lowerCamelCase_ = cell_selection_preference
lowerCamelCase_ = answer_loss_cutoff
lowerCamelCase_ = max_num_rows
lowerCamelCase_ = max_num_columns
lowerCamelCase_ = average_logits_per_cell
lowerCamelCase_ = select_one_column
lowerCamelCase_ = allow_empty_column_selection
lowerCamelCase_ = init_cell_selection_weights_to_zero
lowerCamelCase_ = reset_position_index_per_cell
lowerCamelCase_ = disable_per_token_loss
# Aggregation hyperparameters
lowerCamelCase_ = aggregation_labels
lowerCamelCase_ = no_aggregation_label_index
if isinstance(self.aggregation_labels , A_ ):
lowerCamelCase_ = {int(A_ ): v for k, v in aggregation_labels.items()}
| 719
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 0
|
import heapq as hq
import math
from collections.abc import Iterator
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = str(id_ )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = []
lowerCamelCase_ = {} # {vertex:distance}
def __lt__( self : Dict , A_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.id
def a__ ( self : int , A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.neighbors.append(A_ )
def a__ ( self : Optional[Any] , A_ : Any , A_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = weight
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase )
graph[b - 1].add_edge(graph[a - 1] , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : list , lowercase : Vertex ):
'''simple docstring'''
lowerCamelCase_ = []
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = graph[:]
while q:
lowerCamelCase_ = min(lowercase )
q.remove(lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
for i in range(1 , len(lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _SCREAMING_SNAKE_CASE ( lowercase : list , lowercase : Vertex ):
'''simple docstring'''
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = list(lowercase )
hq.heapify(lowercase )
while h:
lowerCamelCase_ = hq.heappop(lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
hq.heapify(lowercase )
for i in range(1 , len(lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return EnvironmentCommand()
class A( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = huggingface_hub.__version__
lowerCamelCase_ = 'not installed'
lowerCamelCase_ = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = 'not installed'
if is_transformers_available():
import transformers
lowerCamelCase_ = transformers.__version__
lowerCamelCase_ = 'not installed'
if is_accelerate_available():
import accelerate
lowerCamelCase_ = accelerate.__version__
lowerCamelCase_ = 'not installed'
if is_xformers_available():
import xformers
lowerCamelCase_ = xformers.__version__
lowerCamelCase_ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def a__ ( A_ : Dict ) -> Any:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 651
| 0
|
from itertools import product
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = sides_number
lowerCamelCase_ = max_face_number * dice_number
lowerCamelCase_ = [0] * (max_total + 1)
lowerCamelCase_ = 1
lowerCamelCase_ = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
lowerCamelCase_ = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ = 0
lowerCamelCase_ = 9
lowerCamelCase_ = 4 * 9
lowerCamelCase_ = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ = (4**9) * (6**6)
lowerCamelCase_ = peter_wins_count / total_games_number
lowerCamelCase_ = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 721
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
lowerCamelCase : Union[str, Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCamelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCamelCase = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
UpperCamelCase = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = {}
if self.train_dir is not None:
lowerCamelCase_ = self.train_dir
if self.validation_dir is not None:
lowerCamelCase_ = self.validation_dir
lowerCamelCase_ = data_files if data_files else None
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase )} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class A:
'''simple docstring'''
def __init__( self : Any , A_ : List[str]=192 , A_ : Optional[Any]=32 , A_ : Tuple=4 , A_ : str=0.6 ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = input_size
lowerCamelCase_ = mask_patch_size
lowerCamelCase_ = model_patch_size
lowerCamelCase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
lowerCamelCase_ = self.input_size // self.mask_patch_size
lowerCamelCase_ = self.mask_patch_size // self.model_patch_size
lowerCamelCase_ = self.rand_size**2
lowerCamelCase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowerCamelCase_ = np.zeros(self.token_count , dtype=A_ )
lowerCamelCase_ = 1
lowerCamelCase_ = mask.reshape((self.rand_size, self.rand_size) )
lowerCamelCase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = torch.stack([example['pixel_values'] for example in examples] )
lowerCamelCase_ = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase_ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
lowerCamelCase_ = ds['train'].train_test_split(data_args.train_val_split )
lowerCamelCase_ = split['train']
lowerCamelCase_ = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
lowerCamelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase , 'decoder_type' ):
lowerCamelCase_ = 'simmim'
# adapt config
lowerCamelCase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowerCamelCase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowerCamelCase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
lowerCamelCase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowerCamelCase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowerCamelCase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase_ = AutoModelForMaskedImageModeling.from_config(lowercase )
if training_args.do_train:
lowerCamelCase_ = ds['train'].column_names
else:
lowerCamelCase_ = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCamelCase_ = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase_ = 'image'
elif "img" in column_names:
lowerCamelCase_ = 'img'
else:
lowerCamelCase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowerCamelCase_ = Compose(
[
Lambda(lambda lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowerCamelCase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase : Any ):
lowerCamelCase_ = [transforms(lowercase ) for image in examples[image_column_name]]
lowerCamelCase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCamelCase_ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCamelCase_ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase )
# Initialize our trainer
lowerCamelCase_ = Trainer(
model=lowercase , args=lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ = trainer.evaluate()
trainer.log_metrics('eval' , lowercase )
trainer.save_metrics('eval' , lowercase )
# Write model card and (optionally) push to hub
lowerCamelCase_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 700
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651
| 0
|
import math
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( lowercase : Callable[[float], float] , lowercase : float , lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = xa
lowerCamelCase_ = xa
while True:
if x_n == x_na or function(lowercase ) == function(lowercase ):
raise ZeroDivisionError('float division by zero, could not find root' )
lowerCamelCase_ = x_na - (
function(lowercase ) / ((function(lowercase ) - function(lowercase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowerCamelCase_ = x_na
lowerCamelCase_ = x_na
def _SCREAMING_SNAKE_CASE ( lowercase : float ):
'''simple docstring'''
return math.pow(lowercase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 701
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 0
|
import math
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00 ):
'''simple docstring'''
lowerCamelCase_ = sum(i * i for i in range(1 , n + 1 ) )
lowerCamelCase_ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 702
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = word
return word
def a__ ( self : str , A_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
for token in re.findall(self.pat , A_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def a__ ( self : Tuple , A_ : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Tuple , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
lowerCamelCase_ = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
lowerCamelCase_ = ' '.join(A_ )
lowerCamelCase_ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 651
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self : str ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=A_ , unet=self.dummy_unet , mel=A_ , scheduler=A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=A_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=A_ , steps=4 , return_dict=A_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A_ , scheduler=A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=A_ , generator=A_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A_ , mel=A_ , scheduler=A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=A_ , encoding=A_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=A_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 703
|
lowerCamelCase : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowercase , lowercase )
operand_stack.push(lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 651
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 704
|
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651
| 0
|
from math import ceil, sqrt
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase_ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase_ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : str=13 , A_ : List[Any]=32 , A_ : Tuple=2 , A_ : Dict=3 , A_ : Union[str, Any]=16 , A_ : List[str]=[32, 64, 128] , A_ : Optional[Any]=[1, 2, 1] , A_ : Tuple=[2, 2, 4] , A_ : Dict=2 , A_ : Optional[Any]=2.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : List[str]=0.0 , A_ : Optional[int]=0.1 , A_ : str="gelu" , A_ : Optional[Any]=False , A_ : Any=True , A_ : Optional[Any]=0.02 , A_ : Dict=1E-5 , A_ : int=True , A_ : Optional[int]=None , A_ : List[str]=True , A_ : Tuple=10 , A_ : Any=8 , A_ : Dict=["stage1", "stage2"] , A_ : Optional[Any]=[1, 2] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : int , A_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self : Tuple , A_ : List[str] , A_ : Optional[int] , A_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Tuple , A_ : List[Any] , A_ : int , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : int , A_ : List[Any] , A_ : int , A_ : Dict , A_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase = FocalNetConfig
UpperCamelCase = False
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = FocalNetModelTester(self )
| 651
| 0
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class A:
'''simple docstring'''
UpperCamelCase = None
@experimental
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int , lowercase : Union[str, Any] , lowercase : int , lowercase : List[Any] , lowercase : Optional[int] , lowercase : str ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
return _map_with_joblib(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : str , lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[str] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = num_proc if num_proc <= len(lowercase ) else len(lowercase )
lowerCamelCase_ = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
lowerCamelCase_ = len(lowercase ) // num_proc
lowerCamelCase_ = len(lowercase ) % num_proc
lowerCamelCase_ = div * index + min(lowercase , lowercase )
lowerCamelCase_ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(lowercase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowerCamelCase_ , lowerCamelCase_ = None, None
if not disable_tqdm:
lowerCamelCase_ , lowerCamelCase_ = (RLock(),), tqdm.set_lock
with Pool(lowercase , initargs=lowercase , initializer=lowercase ) as pool:
lowerCamelCase_ = pool.map(lowercase , lowercase )
logger.info(f"""Finished {num_proc} processes""" )
lowerCamelCase_ = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(lowercase )} objects""" )
return mapped
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Optional[Any] , lowercase : int , lowercase : List[str] , lowercase : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase_ = None
| 706
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
] , )
lowerCamelCase_ = text_generator.model.config.eos_token_id
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
[
{'generated_token_ids': ANY(A_ )},
{'generated_token_ids': ANY(A_ )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ = text_generator('This is a test' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
lowerCamelCase_ = text_generator(['This is a test', 'This is a second test'] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : Optional[int] , A_ : Dict , A_ : int , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'Hello I believe in'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase_ = text_generator(A_ )
self.assertEqual(
A_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
lowerCamelCase_ = text_generator(A_ , stop_sequence=' fe' )
self.assertEqual(A_ , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = text_generator.model
lowerCamelCase_ = text_generator.tokenizer
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = pipeline(task='text-generation' , model=A_ , tokenizer=A_ , return_full_text=A_ )
lowerCamelCase_ = text_generator('This is a test' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
lowerCamelCase_ = text_generator('This is a test' , return_full_text=A_ )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
lowerCamelCase_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
[{'generated_text': ANY(A_ )}, {'generated_text': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
lowerCamelCase_ = text_generator('test' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ = text_generator('' )
self.assertEqual(A_ , [{'generated_text': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
lowerCamelCase_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCamelCase_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ = pipe('This is a test' )
self.assertEqual(
A_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : int ) -> str:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
import torch
lowerCamelCase_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=A_ , top_p=0.5 )
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'Hello world'
lowerCamelCase_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
lowerCamelCase_ = logging.get_logger('transformers.generation.tf_utils' )
else:
lowerCamelCase_ = logging.get_logger('transformers.generation.utils' )
lowerCamelCase_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
lowerCamelCase_ = text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 651
| 0
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCamelCase : int = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , *A_ : Union[str, Any] , **A_ : Union[str, Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 707
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
lowerCamelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int:
"""simple docstring"""
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(A_ , mode=A_ )
lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(A_ , 'w' , newline='\n' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , 'r' ) as f:
self.assertTrue(f.read() , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , )
# Copy consistency with a really long name
lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
| 651
| 0
|
from math import sqrt
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 708
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 0
|
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 709
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : str = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 651
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''bit'''
UpperCamelCase = ['''preactivation''', '''bottleneck''']
UpperCamelCase = ['''SAME''', '''VALID''']
def __init__( self : Optional[Any] , A_ : int=3 , A_ : Any=64 , A_ : Optional[int]=[256, 512, 1024, 2048] , A_ : str=[3, 4, 6, 3] , A_ : Any="preactivation" , A_ : Optional[int]="relu" , A_ : List[Any]=None , A_ : Tuple=32 , A_ : List[str]=0.0 , A_ : str=False , A_ : Optional[int]=32 , A_ : List[str]=1 , A_ : List[str]=None , A_ : str=None , **A_ : int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**A_ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowerCamelCase_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
lowerCamelCase_ = num_channels
lowerCamelCase_ = embedding_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = layer_type
lowerCamelCase_ = hidden_act
lowerCamelCase_ = global_padding
lowerCamelCase_ = num_groups
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = embedding_dynamic_padding
lowerCamelCase_ = output_stride
lowerCamelCase_ = width_factor
lowerCamelCase_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(A_ ) + 1 )]
lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 710
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Tuple = "▁"
lowerCamelCase : int = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
lowerCamelCase : Dict = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
lowerCamelCase : str = {
"facebook/s2t-small-librispeech-asr": 1_024,
}
lowerCamelCase : Optional[Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
lowerCamelCase : Tuple = {"mustc": MUSTC_LANGS}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = MAX_MODEL_INPUT_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = []
def __init__( self : Tuple , A_ : Optional[int] , A_ : List[Any] , A_ : Tuple="<s>" , A_ : Any="</s>" , A_ : str="<pad>" , A_ : Tuple="<unk>" , A_ : List[Any]=False , A_ : Union[str, Any]=False , A_ : Union[str, Any]=None , A_ : Tuple=None , A_ : Optional[Dict[str, Any]] = None , **A_ : int , ) -> None:
"""simple docstring"""
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , do_upper_case=A_ , do_lower_case=A_ , tgt_lang=A_ , lang_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
lowerCamelCase_ = do_upper_case
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = load_json(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = spm_file
lowerCamelCase_ = load_spm(A_ , self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase_ = lang_codes
lowerCamelCase_ = LANGUAGES[lang_codes]
lowerCamelCase_ = [f"""<lang:{lang}>""" for lang in self.langs]
lowerCamelCase_ = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
lowerCamelCase_ = self.lang_tokens
lowerCamelCase_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase_ = {}
@property
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def a__ ( self : Dict , A_ : List[str] ) -> None:
"""simple docstring"""
lowerCamelCase_ = new_tgt_lang
self.set_tgt_lang_special_tokens(A_ )
def a__ ( self : Any , A_ : str ) -> None:
"""simple docstring"""
lowerCamelCase_ = self.lang_code_to_id[tgt_lang]
lowerCamelCase_ = [lang_code_id]
def a__ ( self : Any , A_ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def a__ ( self : Optional[int] , A_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def a__ ( self : Optional[int] , A_ : int ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def a__ ( self : List[str] , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase_ = self.sp_model.decode(A_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase_ = []
else:
current_sub_tokens.append(A_ )
lowerCamelCase_ = self.sp_model.decode(A_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def a__ ( self : List[str] , A_ : List[str] , A_ : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
lowerCamelCase_ = [1] * len(self.prefix_tokens )
lowerCamelCase_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def a__ ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self : Union[str, Any] , A_ : Dict ) -> None:
"""simple docstring"""
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase_ = {}
lowerCamelCase_ = load_spm(self.spm_file , self.sp_model_kwargs )
def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ = Path(A_ )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
lowerCamelCase_ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
lowerCamelCase_ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Dict[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : str ):
'''simple docstring'''
with open(lowercase , 'w' ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 711
|
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 651
| 0
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = LxmertTokenizer
UpperCamelCase = LxmertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Any , A_ : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [7, 4, 5, 10, 8, 9] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'I was born in 92000, and this is falsé.'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
| 712
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["ViTFeatureExtractor"]
lowerCamelCase : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 651
| 0
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCamelCase : int = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
lowerCamelCase_ = v.to_dict()
return d
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 0
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 714
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : str ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [int(lowercase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(lowercase ) == 4 and all(0 <= int(lowercase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
lowerCamelCase : str = input().strip()
lowerCamelCase : List[str] = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 715
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 651
| 0
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = GPTSwaTokenizer
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = False
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = GPTSwaTokenizer(A_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : str , A_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'This is a test'
lowerCamelCase_ = 'This is a test'
return input_text, output_text
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = '<s>'
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(A_ ) , 2000 )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(A_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [465, 287, 265, 631, 842] )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
A_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(A_ )
# fmt: off
self.assertListEqual(
A_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = GPTSwaTokenizer(A_ )
lowerCamelCase_ = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCamelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A_ , A_ ):
self.assertListEqual(tokenizer.encode_fast(A_ ) , A_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(A_ , A_ ):
self.assertEqual(tokenizer.decode_fast(A_ ) , A_ )
@slow
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCamelCase_ = {'input_ids': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=A_ , )
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE ( lowercase : Sequence[float] , lowercase : bool = False ):
'''simple docstring'''
if not arr:
return 0
lowerCamelCase_ = 0 if allow_empty_subarrays else float('-inf' )
lowerCamelCase_ = 0.0
for num in arr:
lowerCamelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCamelCase_ = max(lowercase , lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : Dict = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 717
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase : str = None
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Union[str, Any] = {
"facebook/nllb-large-en-ro": 1_024,
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
lowerCamelCase : List[Any] = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = NllbTokenizer
UpperCamelCase = []
UpperCamelCase = []
def __init__( self : Optional[int] , A_ : str=None , A_ : Any=None , A_ : List[str]="<s>" , A_ : List[Any]="</s>" , A_ : List[str]="</s>" , A_ : Optional[int]="<s>" , A_ : Dict="<unk>" , A_ : Dict="<pad>" , A_ : Any="<mask>" , A_ : List[str]=None , A_ : List[Any]=None , A_ : List[Any]=None , A_ : str=False , **A_ : int , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
lowerCamelCase_ = legacy_behaviour
super().__init__(
vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , legacy_behaviour=A_ , **A_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__ ( self : Dict , A_ : str ) -> None:
"""simple docstring"""
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ ( self : Any , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ ( self : Optional[int] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , A_ : int , A_ : str , A_ : Optional[str] , A_ : Optional[str] , **A_ : int ) -> int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
lowerCamelCase_ = self.convert_tokens_to_ids(A_ )
lowerCamelCase_ = tgt_lang_id
return inputs
def a__ ( self : Tuple , A_ : List[str] , A_ : str = "eng_Latn" , A_ : Optional[List[str]] = None , A_ : str = "fra_Latn" , **A_ : Tuple , ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__ ( self : str ) -> str:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ ( self : Optional[int] , A_ : Dict ) -> None:
"""simple docstring"""
lowerCamelCase_ = self.convert_tokens_to_ids(A_ )
if self.legacy_behaviour:
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase_ = [self.cur_lang_code]
lowerCamelCase_ = [self.eos_token_id]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a__ ( self : str , A_ : str ) -> None:
"""simple docstring"""
lowerCamelCase_ = self.convert_tokens_to_ids(A_ )
if self.legacy_behaviour:
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase_ = [self.cur_lang_code]
lowerCamelCase_ = [self.eos_token_id]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a__ ( self : List[str] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 718
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 651
| 0
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase : Optional[int] = 2
class A:
'''simple docstring'''
def __init__( self : Tuple , *, # begin keyword-only arguments
A_ : Optional[Any]="<s>" , A_ : Tuple="<pad>" , A_ : Any="</s>" , A_ : Optional[int]="<unk>" , A_ : int=None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = bos, unk, pad, eos
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = {}
lowerCamelCase_ = self.add_symbol(A_ )
lowerCamelCase_ = self.add_symbol(A_ )
lowerCamelCase_ = self.add_symbol(A_ )
lowerCamelCase_ = self.add_symbol(A_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A_ )
lowerCamelCase_ = len(self.symbols )
def __eq__( self : str , A_ : Dict ) -> str:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Tuple , A_ : List[Any] ) -> Any:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Any , A_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return sym in self.indices
@classmethod
def a__ ( cls : Dict , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = cls()
d.add_from_file(A_ )
return d
def a__ ( self : Any , A_ : Tuple , A_ : Tuple=1 , A_ : Tuple=False ) -> Dict:
"""simple docstring"""
if word in self.indices and not overwrite:
lowerCamelCase_ = self.indices[word]
lowerCamelCase_ = self.count[idx] + n
return idx
else:
lowerCamelCase_ = len(self.symbols )
lowerCamelCase_ = idx
self.symbols.append(A_ )
self.count.append(A_ )
return idx
def a__ ( self : int , A_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return 0
def a__ ( self : Dict , A_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if isinstance(A_ , A_ ):
try:
with open(A_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(A_ ) )
return
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = self._load_meta(A_ )
for line in lines[indices_start_line:]:
try:
lowerCamelCase_ , lowerCamelCase_ = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
lowerCamelCase_ = True
lowerCamelCase_ , lowerCamelCase_ = line.rsplit(' ' , 1 )
else:
lowerCamelCase_ = False
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(A_ ) )
self.add_symbol(A_ , n=A_ , overwrite=A_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = dict((re.sub(r'@@$' , '' , lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , lowercase ), v) for k, v in d.items() )
lowerCamelCase_ = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
lowerCamelCase_ = d[k] # restore
return da
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
if not os.path.exists(lowercase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowercase , exist_ok=lowercase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCamelCase_ = os.path.join(lowercase , 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
lowerCamelCase_ = chkpt['cfg']['model']
# dicts
lowerCamelCase_ = os.path.join(lowercase , 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
lowerCamelCase_ = Dictionary.load(lowercase )
lowerCamelCase_ = rewrite_dict_keys(src_dict.indices )
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = os.path.join(lowercase , VOCAB_FILES_NAMES['vocab_file'] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# merges_file (bpecodes)
lowerCamelCase_ = os.path.join(lowercase , 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
lowerCamelCase_ = os.path.join(lowercase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase , lowercase )
# model config
lowerCamelCase_ = os.path.join(lowercase , 'config.json' )
lowerCamelCase_ = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# tokenizer config
lowerCamelCase_ = os.path.join(lowercase , lowercase )
lowerCamelCase_ = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# model
lowerCamelCase_ = chkpt['model']
# remove unneeded keys
lowerCamelCase_ = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase , lowercase )
lowerCamelCase_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
lowerCamelCase_ = model_state_dict.pop(lowercase )
else:
lowerCamelCase_ = model_state_dict.pop(lowercase )
lowerCamelCase_ = BioGptConfig.from_pretrained(lowercase )
lowerCamelCase_ = BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
lowerCamelCase_ = os.path.join(lowercase , lowercase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowercase , lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : str = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 719
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.