code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
UpperCamelCase : Tuple = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__SCREAMING_SNAKE_CASE ) , torch_builtin(__SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(__SCREAMING_SNAKE_CASE ) , gelu_new(__SCREAMING_SNAKE_CASE ) ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
UpperCamelCase : Optional[int] = get_activation("""gelu""" )
UpperCamelCase : List[str] = get_activation("""gelu_10""" )
UpperCamelCase : Union[str, Any] = torch_builtin(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = geluaa(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__SCREAMING_SNAKE_CASE ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _a ( self ):
'''simple docstring'''
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
get_activation("""bogus""" )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
get_activation(__SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = get_activation("""gelu""" )
UpperCamelCase : str = 1
UpperCamelCase : str = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = acta.a
| 102 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( _a):
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self : List[Any] ):
raise NotImplementedError()
| 333 | 0 |
'''simple docstring'''
from math import factorial, radians
def __UpperCAmelCase ( UpperCamelCase__ :float , UpperCamelCase__ :int = 18 , UpperCamelCase__ :int = 10 ) -> float:
snake_case__ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
snake_case__ : Dict = radians(UpperCamelCase__ )
snake_case__ : int = angle_in_radians
snake_case__ : Union[str, Any] = 3
snake_case__ : Tuple = -1
for _ in range(UpperCamelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(UpperCamelCase__ )
snake_case__ : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 574 |
'''simple docstring'''
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574 | 1 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 4 ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase = abs(SCREAMING_SNAKE_CASE_ ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE_ )] for y in range(SCREAMING_SNAKE_CASE_ )]
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(SCREAMING_SNAKE_CASE_ ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE_ ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(SCREAMING_SNAKE_CASE_ ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase = [list(SCREAMING_SNAKE_CASE_ ) for x in zip(*SCREAMING_SNAKE_CASE_ )]
return matrix
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase = matrix[::-1]
return matrix
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase = [x[::-1] for x in matrix]
return matrix
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : str = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
a__ : List[str] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
a__ : Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 51 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 1 |
from collections.abc import Sequence
def lowerCAmelCase__ ( lowerCamelCase_ : Sequence[float] ,lowerCamelCase_ : float):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : Sequence[float] ,lowerCamelCase_ : float):
'''simple docstring'''
lowerCAmelCase__ : int = 0.0
for coeff in reversed(lowerCamelCase_):
lowerCAmelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
__snake_case : Optional[int] =(0.0, 0.0, 5.0, 9.3, 7.0)
__snake_case : Dict =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 90 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase ,'''width_multiplier''' ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=64 ,__lowerCamelCase=2 ,__lowerCamelCase=3 ,__lowerCamelCase="swish" ,__lowerCamelCase=3 ,__lowerCamelCase=32 ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.02 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=10 ,__lowerCamelCase=None ,__lowerCamelCase=0.25 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : List[str] = batch_size
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : List[Any] = num_channels
lowerCAmelCase__ : Tuple = make_divisible(5_12 * width_multiplier ,divisor=8 )
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Optional[int] = conv_kernel_size
lowerCAmelCase__ : int = output_stride
lowerCAmelCase__ : List[Any] = classifier_dropout_prob
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : str = num_labels
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Tuple = scope
lowerCAmelCase__ : Tuple = width_multiplier
lowerCAmelCase__ : Optional[int] = ffn_dropout
lowerCAmelCase__ : Dict = attn_dropout
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = MobileViTVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = MobileViTVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Dict = model(__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
lowerCAmelCase__ : List[str] = model(__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case_ =(
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = MobileViTVaModelTester(self )
lowerCAmelCase__ : Optional[int] = MobileViTVaConfigTester(self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : str = [*signature.parameters.keys()]
lowerCAmelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) )
lowerCAmelCase__ : Dict = outputs.hidden_states
lowerCAmelCase__ : Any = 5
self.assertEqual(len(__lowerCamelCase ) ,__lowerCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ : Optional[Any] = 2
for i in range(len(__lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Tuple = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = MobileViTVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCamelCase )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : int = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
lowerCAmelCase__ : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__lowerCamelCase )
lowerCAmelCase__ : Dict = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : Optional[Any] = model.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**__lowerCamelCase )
lowerCAmelCase__ : Tuple = outputs.logits
# verify the logits
lowerCAmelCase__ : Optional[int] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=__lowerCamelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : Dict = model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase ,target_sizes=[(50, 60)] )
lowerCAmelCase__ : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,__lowerCamelCase )
| 90 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
__snake_case = int(number**0.5 )
return number == sq * sq
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> tuple[int, int]:
"""simple docstring"""
__snake_case = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__snake_case = x_den * y_den * z_den
__snake_case = gcd(lowercase__ , lowercase__ )
top //= hcf
bottom //= hcf
return top, bottom
def _a (lowercase__ : int = 3_5 ) -> int:
"""simple docstring"""
__snake_case = set()
__snake_case = 42
__snake_case = Fraction(0 )
__snake_case = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__snake_case = x_num * y_den + x_den * y_num
__snake_case = x_den * y_den
__snake_case = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__snake_case = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=2
__snake_case = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__snake_case = x_den * x_den * y_den * y_den
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
__snake_case = int(sqrt(lowercase__ ) )
__snake_case = int(sqrt(lowercase__ ) )
__snake_case = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__snake_case = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=-1
__snake_case = x_num * y_num
__snake_case = x_den * y_num + x_num * y_den
__snake_case = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__snake_case = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=2
__snake_case = x_num * x_num * y_num * y_num
__snake_case = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
__snake_case = int(sqrt(lowercase__ ) )
__snake_case = int(sqrt(lowercase__ ) )
__snake_case = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__snake_case = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
for num, den in unique_s:
total += Fraction(lowercase__ , lowercase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''efficientnet'''
def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[str] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Union[str, Any] = width_coefficient
snake_case_ : Tuple = depth_coefficient
snake_case_ : Optional[Any] = depth_divisor
snake_case_ : Optional[int] = kernel_sizes
snake_case_ : str = in_channels
snake_case_ : Optional[Any] = out_channels
snake_case_ : int = depthwise_padding
snake_case_ : Optional[Any] = strides
snake_case_ : Any = num_block_repeats
snake_case_ : Optional[Any] = expand_ratios
snake_case_ : Union[str, Any] = squeeze_expansion_ratio
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dim
snake_case_ : Any = pooling_type
snake_case_ : List[str] = initializer_range
snake_case_ : str = batch_norm_eps
snake_case_ : Optional[int] = batch_norm_momentum
snake_case_ : Optional[Any] = dropout_rate
snake_case_ : List[str] = drop_connect_rate
snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
| 60 | 0 |
def a ( A__ : str ) -> str:
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(A__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 380 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowercase_ = logging.getLogger(__name__)
lowercase_ = {'facebook/bart-base': BartForConditionalGeneration}
lowercase_ = {'facebook/bart-base': BartTokenizer}
def a ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=A__ , default=A__ , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=A__ , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=A__ , default=A__ , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=A__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--config_name' , type=A__ , default=A__ , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=A__ , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=A__ , default=A__ , help='Where to store the final ONNX file.' )
_lowercase =parser.parse_args()
return args
def a ( A__ : int , A__ : Optional[int]="cpu" ) -> Optional[int]:
"""simple docstring"""
_lowercase =model_dict[model_name].from_pretrained(A__ ).to(A__ )
_lowercase =tokenizer_dict[model_name].from_pretrained(A__ )
if model_name in ["facebook/bart-base"]:
_lowercase =0
_lowercase =None
_lowercase =0
return huggingface_model, tokenizer
def a ( A__ : List[str] , A__ : Optional[Any] , A__ : List[Any] , A__ : Dict , A__ : Tuple ) -> List[str]:
"""simple docstring"""
model.eval()
_lowercase =None
_lowercase =torch.jit.script(BARTBeamSearchGenerator(A__ ) )
with torch.no_grad():
_lowercase ='My friends are cool but they eat too many carbs.'
_lowercase =tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt' ).to(model.device )
_lowercase =model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=A__ , max_length=A__ , early_stopping=A__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
A__ , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , A__ , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=A__ , )
logger.info('Model exported to {}'.format(A__ ) )
_lowercase =remove_dup_initializers(os.path.abspath(A__ ) )
logger.info('Deduplicated and optimized model written to {}'.format(A__ ) )
_lowercase =onnxruntime.InferenceSession(A__ )
_lowercase =ort_sess.run(
A__ , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(A__ ),
'max_length': np.array(A__ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def a ( ) -> int:
"""simple docstring"""
_lowercase =parse_args()
_lowercase =5
_lowercase =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase =torch.device(args.device )
_lowercase , _lowercase =load_model_tokenizer(args.model_name_or_path , A__ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(A__ )
if args.max_length:
_lowercase =args.max_length
if args.num_beams:
_lowercase =args.num_beams
if args.output_file_path:
_lowercase =args.output_file_path
else:
_lowercase ='BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(A__ , A__ , A__ , A__ , A__ )
if __name__ == "__main__":
main()
| 380 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
_UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __UpperCamelCase ( A__ ):
def UpperCamelCase( self ):
import xla_spawn
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_UpperCamelCase , '''argv''' , _UpperCamelCase ):
_UpperCAmelCase = time()
xla_spawn.main()
_UpperCAmelCase = time()
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCamelCase( self ):
import xla_spawn
_UpperCAmelCase = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(_UpperCamelCase , '''argv''' , _UpperCamelCase ):
xla_spawn.main() | 32 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowercase ( A , A , unittest.TestCase ):
__magic_name__ : List[str] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ : Optional[Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ : Optional[int] = False
__magic_name__ : List[Any] = False
def lowerCAmelCase_ ( self , a__ , a__ , a__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowercase ( A ):
def __init__( self , a__ , a__=1_3 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=9_9 , a__=3_2 , a__=3_2 , a__=2 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=1_6 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ) -> List[str]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = embedding_size
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
A_ = TFMobileBertModel(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
A_ = [input_ids, input_mask]
A_ = model(a__ )
A_ = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
A_ = TFMobileBertForMaskedLM(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> int:
'''simple docstring'''
A_ = TFMobileBertForNextSentencePrediction(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
A_ = TFMobileBertForPreTraining(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFMobileBertForSequenceClassification(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
A_ = self.num_choices
A_ = TFMobileBertForMultipleChoice(config=a__ )
A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFMobileBertForTokenClassification(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
A_ = TFMobileBertForQuestionAnswering(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
A_ = ConfigTester(self , config_class=a__ , hidden_size=3_7 )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a__ )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a__ )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a__ )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a__ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a__ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a__ )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a__ )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a__ )
@slow
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A_ = TFMobileBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(a__ )[0]
A_ = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , a__ )
A_ = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-4 ) | 141 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase_( __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Tuple=0 ):
"""simple docstring"""
if name is None:
_lowerCAmelCase :Optional[int] = None
else:
_lowerCAmelCase :Tuple = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_lowerCAmelCase :Dict = fmt.format(__magic_name__ )
# Print and recurse (if needed).
if isinstance(__magic_name__ , __magic_name__ ):
if msg is not None:
print(__magic_name__ )
for k in val.keys():
recursive_print(__magic_name__ , val[k] , spaces + 2 )
elif isinstance(__magic_name__ , torch.Tensor ):
print(__magic_name__ , ':' , val.size() )
else:
print(__magic_name__ , ':' , __magic_name__ )
def UpperCamelCase_( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCAmelCase :Optional[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCAmelCase :List[Any] = param.view(*__magic_name__ )
_lowerCAmelCase :List[str] = param.transpose(0 , 2 )
_lowerCAmelCase :Dict = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCAmelCase :str = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCAmelCase :List[Any] = param.view(*__magic_name__ )
_lowerCAmelCase :int = param.transpose(0 , 1 ).contiguous()
_lowerCAmelCase :List[Any] = param.view(*__magic_name__ )
return param
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :List[str] = {}
# old versions did not store training args
_lowerCAmelCase :str = input_state_dict.get('args' , __magic_name__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCAmelCase :Optional[int] = ds_args.padded_vocab_size
_lowerCAmelCase :Union[str, Any] = ds_args.max_position_embeddings
_lowerCAmelCase :Union[str, Any] = ds_args.hidden_size
_lowerCAmelCase :Tuple = ds_args.num_layers
_lowerCAmelCase :List[str] = ds_args.num_attention_heads
_lowerCAmelCase :Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCAmelCase :Any = config.n_head
# The hidden_size per head.
_lowerCAmelCase :Union[str, Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCAmelCase :Any = input_state_dict['checkpoint_version']
else:
_lowerCAmelCase :Optional[int] = 0.0
# The model.
_lowerCAmelCase :Any = input_state_dict['model']
# The language model.
_lowerCAmelCase :Dict = model['language_model']
# The embeddings.
_lowerCAmelCase :Optional[int] = lm['embedding']
# The word embeddings.
_lowerCAmelCase :Any = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_lowerCAmelCase :Union[str, Any] = word_embeddings[: config.vocab_size, :]
_lowerCAmelCase :List[str] = word_embeddings
# The position embeddings.
_lowerCAmelCase :Dict = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCAmelCase :Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
_lowerCAmelCase :Optional[int] = pos_embeddings
# The transformer.
_lowerCAmelCase :List[Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_lowerCAmelCase :str = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_lowerCAmelCase :List[Any] = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCAmelCase :Dict = layer_re.match(__magic_name__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCAmelCase :Optional[Any] = int(m.group(1 ) )
# The name of the operation.
_lowerCAmelCase :int = m.group(2 )
# Is it a weight or a bias?
_lowerCAmelCase :List[Any] = m.group(3 )
# The name of the layer.
_lowerCAmelCase :List[Any] = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_lowerCAmelCase :List[Any] = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_lowerCAmelCase :Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCAmelCase :Tuple = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCAmelCase :List[str] = torch.tensor(-1e4 , dtype=torch.floataa )
_lowerCAmelCase :int = masked_bias
_lowerCAmelCase :Optional[Any] = fix_query_key_value_ordering(__magic_name__ , __magic_name__ , 3 , __magic_name__ , __magic_name__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCAmelCase :Union[str, Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowerCAmelCase :int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCAmelCase :Dict = fix_query_key_value_ordering(__magic_name__ , __magic_name__ , 3 , __magic_name__ , __magic_name__ )
# Store. No change of shape.
_lowerCAmelCase :Optional[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCAmelCase :int = megatron_to_transformers[op_name]
_lowerCAmelCase :Tuple = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCAmelCase :Optional[Any] = megatron_to_transformers[op_name]
_lowerCAmelCase :Any = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCAmelCase :str = transformer['final_layernorm.weight']
_lowerCAmelCase :Dict = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCAmelCase :Optional[Any] = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=__magic_name__ , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=__magic_name__ , help='An optional config json file describing the pre-trained model.' , )
_lowerCAmelCase :Tuple = parser.parse_args()
# Extract the basename.
_lowerCAmelCase :Dict = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_lowerCAmelCase :Dict = torch.load(__magic_name__ , map_location='cpu' )
else:
_lowerCAmelCase :Union[str, Any] = torch.load(args.path_to_checkpoint , map_location='cpu' )
_lowerCAmelCase :Tuple = input_state_dict.get('args' , __magic_name__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCAmelCase :Optional[Any] = 'gelu_fast'
elif ds_args.openai_gelu:
_lowerCAmelCase :Tuple = 'gelu_new'
else:
_lowerCAmelCase :List[Any] = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_lowerCAmelCase :Union[str, Any] = 'gelu_new'
# Spell out all parameters in case the defaults change.
_lowerCAmelCase :List[str] = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__magic_name__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=__magic_name__ , summary_activation=__magic_name__ , summary_proj_to_labels=__magic_name__ , summary_first_dropout=0.1 , scale_attn_weights=__magic_name__ , use_cache=__magic_name__ , bos_token_id=50256 , eos_token_id=50256 , )
else:
_lowerCAmelCase :Optional[int] = GPTaConfig.from_json_file(args.config_file )
_lowerCAmelCase :Any = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_lowerCAmelCase :str = convert_megatron_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__magic_name__ , __magic_name__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCAmelCase :str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCAmelCase :Optional[int] = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_lowerCAmelCase :Union[str, Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
_lowerCAmelCase :Tuple = 'gpt2'
_lowerCAmelCase :Optional[int] = AutoTokenizer.from_pretrained(__magic_name__ )
_lowerCAmelCase :List[Any] = type(__magic_name__ ).__name__
_lowerCAmelCase :List[str] = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(__magic_name__ )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(__magic_name__ )
# Store the state_dict to file.
_lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , 'pytorch_model.bin' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(__magic_name__ , __magic_name__ )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 382 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
a = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
a = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
a = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 382 | 1 |
from __future__ import annotations
from collections.abc import Callable
_lowercase = list[list[float | int]]
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : int = len(snake_case__)
lowerCAmelCase_ : Matrix = [[0 for _ in range(size + 1)] for _ in range(snake_case__)]
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float
for row in range(snake_case__):
for col in range(snake_case__):
lowerCAmelCase_ : Optional[Any] = matrix[row][col]
lowerCAmelCase_ : Any = vector[row][0]
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[Any] = 0
while row < size and col < size:
# pivoting
lowerCAmelCase_ : List[str] = max((abs(augmented[rowa][col]), rowa) for rowa in range(snake_case__ , snake_case__))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , snake_case__):
lowerCAmelCase_ : Optional[int] = augmented[rowa][col] / augmented[row][col]
lowerCAmelCase_ : Dict = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , snake_case__):
for row in range(snake_case__):
lowerCAmelCase_ : Any = augmented[row][col] / augmented[col][col]
for cola in range(snake_case__ , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(snake_case__)
]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = len(snake_case__)
lowerCAmelCase_ : Matrix = [[0 for _ in range(snake_case__)] for _ in range(snake_case__)]
lowerCAmelCase_ : Matrix = [[0] for _ in range(snake_case__)]
lowerCAmelCase_ : Matrix
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
for x_val, y_val in enumerate(snake_case__):
for col in range(snake_case__):
lowerCAmelCase_ : Union[str, Any] = (x_val + 1) ** (size - col - 1)
lowerCAmelCase_ : Tuple = y_val
lowerCAmelCase_ : Optional[int] = solve(snake_case__ , snake_case__)
def interpolated_func(snake_case__) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(snake_case__))
return interpolated_func
def UpperCamelCase ( snake_case__):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase ( snake_case__ = question_function , snake_case__ = 10):
lowerCAmelCase_ : list[int] = [func(snake_case__) for x_val in range(1 , order + 1)]
lowerCAmelCase_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Callable[[int], int]
lowerCAmelCase_ : int
for poly in polynomials:
lowerCAmelCase_ : List[Any] = 1
while func(snake_case__) == poly(snake_case__):
x_val += 1
ret += poly(snake_case__)
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 659 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 | 1 |
def A (__A : float , __A : float , __A : float , __A : float , __A : float , ) -> float:
"""simple docstring"""
UpperCAmelCase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
UpperCAmelCase_ = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ : Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 169 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=a ):
UpperCAmelCase__ : List[str] = ['''torch''', '''torchsde''']
def __init__( self : Optional[Any] , *_snake_case : Tuple , **_snake_case : List[Any]):
"""simple docstring"""
requires_backends(self , ['''torch''', '''torchsde'''])
@classmethod
def lowerCamelCase ( cls : Optional[int] , *_snake_case : Optional[Any] , **_snake_case : str):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''])
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *_snake_case : Optional[Any] , **_snake_case : str):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''])
| 169 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( lowerCAmelCase__ = "AAPL" ):
lowerCamelCase_ = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCamelCase_ = BeautifulSoup(requests.get(lowerCAmelCase__ ).text ,'''html.parser''' )
lowerCamelCase_ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' ,class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 29 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : List[str] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : List[Any] = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCamelCase ( __lowerCamelCase : int = 3 ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(__lowerCamelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
snake_case : Optional[Any] = QuantumRegister(__lowerCamelCase , "qr" )
snake_case : Optional[int] = ClassicalRegister(__lowerCamelCase , "cr" )
snake_case : Dict = QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
snake_case : Tuple = number_of_qubits
for i in range(__lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase )
# simulate with 10000 shots
snake_case : int = Aer.get_backend("qasm_simulator" )
snake_case : Tuple = execute(__lowerCamelCase , __lowerCamelCase , shots=10000 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(
F'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 204 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( __lowerCamelCase : Namespace ):
return TrainCommand(__lowerCamelCase )
class UpperCAmelCase ( A_ ):
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : ArgumentParser ) -> int:
'''simple docstring'''
snake_case : Any = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=snake_case__ , required=snake_case__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=snake_case__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=snake_case__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=snake_case__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=snake_case__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=snake_case__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=snake_case__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=snake_case__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=snake_case__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=snake_case__ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=snake_case__ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=snake_case__ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=snake_case__ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=snake_case__ )
def __init__(self : List[Any] , snake_case__ : Namespace ) -> Tuple:
'''simple docstring'''
snake_case : Any = logging.get_logger("transformers-cli/training" )
snake_case : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=snake_case__ )
snake_case : Any = args.output
snake_case : List[Any] = args.column_label
snake_case : Tuple = args.column_text
snake_case : str = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
snake_case : Optional[int] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
snake_case : Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case : Optional[Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case : Union[str, Any] = args.validation_split
snake_case : Optional[Any] = args.train_batch_size
snake_case : List[str] = args.valid_batch_size
snake_case : List[Any] = args.learning_rate
snake_case : List[Any] = args.adam_epsilon
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 204 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase_ : int = '\\n Text data.\n Second line of data.'
UpperCAmelCase_ : Tuple = 'file'
@pytest.fixture(scope='session' )
def _lowercase ( UpperCamelCase__ : Any ):
__A : Optional[Any] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__A : Optional[int] = bytes(UpperCamelCase__, 'utf-8' )
with zstd.open(UpperCamelCase__, 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def _lowercase ( UpperCamelCase__ : Optional[Any] ):
with open(os.path.join(tmpfs.local_root_dir, UpperCamelCase__ ), 'w' ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format', ['gzip', 'xz', 'zstd'] )
def _lowercase ( UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str] ):
__A : Any = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__A : str = input_paths[compression_format]
__A : str = tmp_path / 'cache'
__A : int = DownloadConfig(cache_dir=UpperCamelCase__, extract_compressed_file=UpperCamelCase__ )
__A : Tuple = cached_path(UpperCamelCase__, download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
__A : Union[str, Any] = f.read()
with open(UpperCamelCase__ ) as f:
__A : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted', [True, False] )
@pytest.mark.parametrize('default_cache_dir', [True, False] )
def _lowercase ( UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : int, UpperCamelCase__ : Dict ):
__A : Optional[Any] = 'custom_cache'
__A : Dict = 'custom_extracted_dir'
__A : int = tmp_path / 'custom_extracted_path'
if default_extracted:
__A : List[str] = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR', UpperCamelCase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH', str(UpperCamelCase__ ) )
__A : Optional[int] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__A : Optional[Any] = xz_file
__A : List[Any] = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=UpperCamelCase__ )
)
__A : Tuple = cached_path(UpperCamelCase__, download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( UpperCamelCase__ : Optional[int] ):
# absolute path
__A : Any = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
__A : Optional[Any] = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def _lowercase ( UpperCamelCase__ : List[str] ):
# absolute path
__A : Any = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
__A : Tuple = './__missing_file__.txt'
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : Tuple ):
__A : Optional[int] = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(UpperCamelCase__ ) as f:
__A : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE', UpperCamelCase__ )
def _lowercase ( ):
with pytest.raises(UpperCamelCase__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : Optional[Any] ):
__A : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
http_get('https://huggingface.co', temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : Any ):
__A : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
ftp_get('ftp://huggingface.co', temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : Optional[Any] ):
__A : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
fsspec_get('s3://huggingface.co', temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head('s3://huggingface.co' )
| 716 |
'''simple docstring'''
from __future__ import annotations
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase ):
"""simple docstring"""
__A : Dict = order
# a_{0} ... a_{k}
__A : str = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__A : Any = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__A : Optional[int] = [0.0] * self.order
# y[n-1] ... y[n-k]
__A : Dict = [0.0] * self.order
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
if len(__lowercase ) < self.order:
__A : List[Any] = [1.0, *a_coeffs]
if len(__lowercase ) != self.order + 1:
__A : Tuple = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__lowercase )}"""
)
raise ValueError(__lowercase )
if len(__lowercase ) != self.order + 1:
__A : List[Any] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__lowercase )}"""
)
raise ValueError(__lowercase )
__A : Optional[int] = a_coeffs
__A : Optional[int] = b_coeffs
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Union[str, Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__A : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__A : List[str] = self.input_history[:-1]
__A : Optional[int] = self.output_history[:-1]
__A : Optional[int] = sample
__A : Dict = result
return result
| 540 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list:
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase, _lowerCamelCase : List[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_lowerCamelCase : int = result + left + right
return input_list
def lowerCamelCase_( _lowerCamelCase ) -> list:
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return input_list
_lowerCamelCase : Dict = list(_lowerCamelCase )
# iteration for two-way merging
_lowerCamelCase : Dict = 2
while p <= len(_lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCamelCase : int = i
_lowerCamelCase : Union[str, Any] = i + p - 1
_lowerCamelCase : Optional[Any] = (low + high + 1) // 2
_lowerCamelCase : Union[str, Any] = merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(_lowerCamelCase ):
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : Any = merge(_lowerCamelCase , 0 , _lowerCamelCase , len(_lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCAmelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
_lowerCAmelCase : List[Any] = []
else:
_lowerCAmelCase : str = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted)) | 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 46 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : str = ['pixel_values']
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = True , __A = 1 / 255 , __A = None , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
_lowerCAmelCase =size if size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase =get_size_dict(__A )
_lowerCAmelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A , param_name='crop_size' )
_lowerCAmelCase =do_resize
_lowerCAmelCase =do_rescale
_lowerCAmelCase =do_normalize
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =size
_lowerCAmelCase =resample
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A )
if "shortest_edge" in size:
_lowerCAmelCase =get_resize_output_image_size(__A , size=size['shortest_edge'] , default_to_square=__A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_lowerCAmelCase =(size['height'], size['width'])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' , default_to_square=__A )
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__A )
if not is_batched(__A ):
_lowerCAmelCase =[images]
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__A ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
_lowerCAmelCase =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__A , __A ) for image in images]
_lowerCAmelCase ={'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 58 | '''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58 | 1 |
import math
def __lowerCAmelCase ( A_ : List[Any] , A_ : Union[str, Any] ) -> List[Any]:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 221 | import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE : List[Any] = get_logger(__name__)
class A_ ( enum.Enum ):
_SCREAMING_SNAKE_CASE = """all_checks"""
_SCREAMING_SNAKE_CASE = """basic_checks"""
_SCREAMING_SNAKE_CASE = """no_checks"""
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
def __A ( _A , _A , _A=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_A ) - set(_A ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_A ) - set(_A ) ) )
if len(set(_A ) - set(_A ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_A ) - set(_A ) ) )
__a = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__a = " for " + verification_name if verification_name is not None else ""
if len(_A ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
def __A ( _A , _A ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_A ) - set(_A ) ) > 0:
raise ExpectedMoreSplits(str(set(_A ) - set(_A ) ) )
if len(set(_A ) - set(_A ) ) > 0:
raise UnexpectedSplits(str(set(_A ) - set(_A ) ) )
__a = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_A ) > 0:
raise NonMatchingSplitsSizesError(str(_A ) )
logger.info("All the splits matched successfully." )
def __A ( _A , _A = True ):
"""simple docstring"""
if record_checksum:
__a = shaaaa()
with open(_A , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(_A )
__a = m.hexdigest()
else:
__a = None
return {"num_bytes": os.path.getsize(_A ), "checksum": checksum}
def __A ( _A ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 197 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 172 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _A :
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] )-> Dict:
pass
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
_lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] )-> Optional[int]:
snake_case__ : Union[str, Any] = pipeline(
"""document-question-answering""" , model=lowerCamelCase , tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : int = INVOICE_URL
snake_case__ : List[Any] = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
snake_case__ : Dict = """What is the placebo?"""
snake_case__ : int = [
{
"""image""": load_image(lowerCamelCase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def __lowerCAmelCase ( self : int , lowerCamelCase : str , lowerCamelCase : List[str] )-> Union[str, Any]:
snake_case__ : List[Any] = dqa_pipeline(lowerCamelCase , top_k=2 )
self.assertEqual(
lowerCamelCase , [
[
{"""score""": ANY(lowerCamelCase ), """answer""": ANY(lowerCamelCase ), """start""": ANY(lowerCamelCase ), """end""": ANY(lowerCamelCase )},
{"""score""": ANY(lowerCamelCase ), """answer""": ANY(lowerCamelCase ), """start""": ANY(lowerCamelCase ), """end""": ANY(lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int )-> List[Any]:
snake_case__ : List[Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
snake_case__ : int = INVOICE_URL
snake_case__ : List[Any] = """How many cats are there?"""
snake_case__ : Optional[int] = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
snake_case__ : Dict = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , lowerCamelCase )
snake_case__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case__ : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Union[str, Any] = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Optional[int] = []
snake_case__ : List[Any] = []
snake_case__ : Tuple = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , words=lowerCamelCase , boxes=lowerCamelCase , top_k=2 )
self.assertEqual(lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int )-> Any:
snake_case__ : List[str] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
snake_case__ : List[Any] = INVOICE_URL
snake_case__ : Optional[Any] = """What is the invoice number?"""
snake_case__ : Any = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : List[Any] )-> Any:
snake_case__ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
snake_case__ : Dict = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Optional[Any] = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : List[str] )-> Dict:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase )
snake_case__ : Tuple = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase , revision="""3dc6de3""" , )
snake_case__ : Optional[int] = INVOICE_URL
snake_case__ : Union[str, Any] = """What is the invoice number?"""
snake_case__ : Dict = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
snake_case__ : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
snake_case__ : Tuple = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Dict = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : int )-> str:
snake_case__ : Dict = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase )
snake_case__ : List[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase , revision="""3dc6de3""" , max_seq_len=50 , )
snake_case__ : Any = INVOICE_URL
snake_case__ : List[Any] = """What is the invoice number?"""
snake_case__ : int = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
snake_case__ : Dict = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def __lowerCAmelCase ( self : int )-> Tuple:
snake_case__ : Optional[int] = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
snake_case__ : str = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Tuple = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def __lowerCAmelCase ( self : int )-> List[Any]:
pass
| 172 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCAmelCase__ = True
except ImportError:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path)
class __lowercase (__UpperCamelCase ):
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Dict = parser.add_parser('add-new-model')
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.')
add_new_model_parser.add_argument('--testing_file' , type=SCREAMING_SNAKE_CASE_ , help='Configuration file on which to run.')
add_new_model_parser.add_argument(
'--path' , type=SCREAMING_SNAKE_CASE_ , help='Path to cookiecutter. Should only be used for testing purposes.')
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE_)
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=None , *UpperCAmelCase_ : Tuple):
UpperCamelCase__ : Any = testing
UpperCamelCase__ : Optional[int] = testing_file
UpperCamelCase__ : Any = path
def __UpperCamelCase ( self : str):
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.')
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n')
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase__ : str = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(SCREAMING_SNAKE_CASE_) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.')
UpperCamelCase__ : str = (
Path(SCREAMING_SNAKE_CASE_).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
UpperCamelCase__ : Optional[Any] = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE_))
else:
with open(self._testing_file , 'r') as configuration_file:
UpperCamelCase__ : Dict = json.load(SCREAMING_SNAKE_CASE_)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=SCREAMING_SNAKE_CASE_ , extra_context=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ : Union[str, Any] = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r') as configuration_file:
UpperCamelCase__ : str = json.load(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Optional[Any] = configuration['lowercase_modelname']
UpperCamelCase__ : List[str] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json')
UpperCamelCase__ : List[Any] = 'PyTorch' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : int = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : str = 'Flax' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : int = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_)
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=SCREAMING_SNAKE_CASE_)
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , 'w'):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(UpperCAmelCase_ : str):
with open(SCREAMING_SNAKE_CASE_ , 'r') as f:
UpperCamelCase__ : List[Any] = f.readlines()
with open(SCREAMING_SNAKE_CASE_ , 'w') as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE_)
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py')
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py')
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py')
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str):
# Create temp file
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = mkstemp()
UpperCamelCase__ : Optional[Any] = False
with fdopen(SCREAMING_SNAKE_CASE_ , 'w') as new_file:
with open(SCREAMING_SNAKE_CASE_) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE_)
if line_to_copy_below in line:
UpperCamelCase__ : List[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE_)
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.')
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# Remove original file
remove(SCREAMING_SNAKE_CASE_)
# Move new file
move(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def skip_units(UpperCAmelCase_ : Optional[int]):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCAmelCase_ : Optional[int]):
with open(SCREAMING_SNAKE_CASE_) as datafile:
UpperCamelCase__ : Any = []
UpperCamelCase__ : Dict = False
UpperCamelCase__ : Any = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase__ : int = line.split('"')[1]
UpperCamelCase__ : Tuple = skip_units(SCREAMING_SNAKE_CASE_)
elif "# Below: " in line and "##" not in line:
UpperCamelCase__ : Union[str, Any] = line.split('"')[1]
UpperCamelCase__ : Union[str, Any] = skip_units(SCREAMING_SNAKE_CASE_)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : List[Any] = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase__ : List[Any] = []
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE_)
remove(SCREAMING_SNAKE_CASE_)
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py')
os.rmdir(SCREAMING_SNAKE_CASE_)
| 596 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __UpperCamelCase ( A , A , A=1e-12 ):
UpperCamelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
UpperCamelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
return jnp.matmul(A , norm_emb_a.T )
class _A ( nn.Module ):
SCREAMING_SNAKE_CASE_ : CLIPConfig
SCREAMING_SNAKE_CASE_ : jnp.dtype =jnp.floataa
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase__ = nn.Dense(self.config.projection_dim , use_bias=SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
UpperCamelCase__ = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCamelCase__ = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase__ = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
UpperCamelCase__ = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.vision_model(SCREAMING_SNAKE_CASE_ )[1]
UpperCamelCase__ = self.visual_projection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jax_cosine_distance(SCREAMING_SNAKE_CASE_ , self.special_care_embeds )
UpperCamelCase__ = jax_cosine_distance(SCREAMING_SNAKE_CASE_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase__ = 0.0
UpperCamelCase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase__ = jnp.round(SCREAMING_SNAKE_CASE_ , 3 )
UpperCamelCase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=SCREAMING_SNAKE_CASE_ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase__ = is_special_care * 0.01
UpperCamelCase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase__ = jnp.round(SCREAMING_SNAKE_CASE_ , 3 )
UpperCamelCase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =CLIPConfig
SCREAMING_SNAKE_CASE_ : Dict ="clip_input"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =FlaxStableDiffusionSafetyCheckerModule
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = jnp.floataa , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if input_shape is None:
UpperCamelCase__ = (1, 224, 224, 3)
UpperCamelCase__ = self.module_class(config=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_shape=SCREAMING_SNAKE_CASE_ , seed=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , _do_init=_do_init )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> FrozenDict:
'''simple docstring'''
UpperCamelCase__ = jax.random.normal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
UpperCamelCase__ = self.module.init(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['''params''']
return random_params
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , ) -> str:
'''simple docstring'''
UpperCamelCase__ = jnp.transpose(SCREAMING_SNAKE_CASE_ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) , rngs={} , )
| 415 | 0 |
def UpperCamelCase_ ( __a , __a ) -> list:
a__ : int = len(__a )
a__ : List[str] = []
for i in range(len(__a ) - pat_len + 1 ):
a__ : str = True
for j in range(__a ):
if s[i + j] != pattern[j]:
a__ : Dict = False
break
if match_found:
position.append(__a )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 701 |
def UpperCamelCase_ ( __a ) -> int:
if not isinstance(__a , __a ):
raise TypeError("only integers accepted as input" )
else:
a__ : Union[str, Any] = str(abs(__a ) )
a__ : Dict = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int("".join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 151 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case = CodeGenTokenizer
snake_case = CodeGenTokenizerFast
snake_case = True
snake_case = {'add_prefix_space': True}
snake_case = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase = {'unk_token': '<unk>'}
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCamelCase ) )
def lowerCamelCase__ ( self : Tuple , **__snake_case : List[str] ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCamelCase__ ( self : List[str] , **__snake_case : Optional[int] ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 'lower newer'
lowerCamelCase = 'lower newer'
return input_text, output_text
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCamelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase = 'lower newer'
lowerCamelCase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase = tokenizer.tokenize(__lowerCamelCase , add_prefix_space=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase = tokens + [tokenizer.unk_token]
lowerCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase )
lowerCamelCase = 'lower newer'
# Testing tokenization
lowerCamelCase = tokenizer.tokenize(__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCamelCase = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Testing conversion to ids without special tokens
lowerCamelCase = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCamelCase = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Testing conversion to ids with special tokens
lowerCamelCase = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase )
lowerCamelCase = tokenizer.encode(__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCamelCase = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Testing the unknown token
lowerCamelCase = tokens + [rust_tokenizer.unk_token]
lowerCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( self : Tuple , *__snake_case : Dict , **__snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# Simple input
lowerCamelCase = 'This is a simple input'
lowerCamelCase = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase = ('This is a simple input', 'This is a pair')
lowerCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='max_length' , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCamelCase = 'This is a simple input'
lowerCamelCase = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase = ('This is a simple input', 'This is a pair')
lowerCamelCase = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase = tokenizer.pad_token_id
lowerCamelCase = tokenizer(__lowerCamelCase , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCamelCase = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncate=__lowerCamelCase , return_tensors='np' )
lowerCamelCase = tokenizer(*__lowerCamelCase , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCamelCase = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncate=__lowerCamelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = '$$$'
lowerCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCamelCase , add_bos_token=__lowerCamelCase )
lowerCamelCase = 'This is a simple input'
lowerCamelCase = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase = tokenizer.bos_token_id
lowerCamelCase = tokenizer(__lowerCamelCase )
lowerCamelCase = tokenizer(__lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase = tokenizer.decode(out_s.input_ids )
lowerCamelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
lowerCamelCase = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
lowerCamelCase = '\nif len_a > len_b: result = a\nelse: result = b'
lowerCamelCase = tokenizer.encode(__lowerCamelCase )
lowerCamelCase = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^\"\"\"', '\n\n\n']
lowerCamelCase = tokenizer.decode(__lowerCamelCase , truncate_before_pattern=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
pass
| 246 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ ( UpperCAmelCase_ )-> str:
"""simple docstring"""
UpperCamelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(snake_case_ , max_perimeter + 1 ):
UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case_ ):
UpperCamelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ ( UpperCAmelCase_ = 10_00 )-> str:
"""simple docstring"""
UpperCamelCase = pythagorean_triple(snake_case_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'''Perimeter {solution()} has maximum solutions''')
| 703 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
| 556 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCamelCase = logging.getLogger(__name__)
def _lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=UpperCAmelCase_, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=UpperCAmelCase_, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=UpperCAmelCase_, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=UpperCAmelCase_, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=UpperCAmelCase_, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=UpperCAmelCase_, type=UpperCAmelCase_, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=UpperCAmelCase_, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=UpperCAmelCase_, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
A__ = parser.parse_args()
return args
def _lowerCamelCase ( UpperCAmelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
def fn(UpperCAmelCase_ : Tuple ):
return tokenizer(examples["text"] )
return fn
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> List[str]:
"""simple docstring"""
A__ = []
for i in range(len(tokenized_data["input_ids"] ) ):
A__ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
A__ = tf.train.Features(feature=UpperCAmelCase_ )
A__ = tf.train.Example(features=UpperCAmelCase_ )
A__ = example.SerializeToString()
records.append(UpperCAmelCase_ )
return records
def _lowerCamelCase ( UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
A__ = min(len(UpperCAmelCase_ ), args.limit )
A__ = dataset.select(range(UpperCAmelCase_ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
A__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
A__ = os.path.join(args.output_dir, args.split )
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
else:
A__ = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
A__ = tokenize_function(UpperCAmelCase_ )
A__ = dataset.map(UpperCAmelCase_, batched=UpperCAmelCase_, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCAmelCase_ : int ):
# Concatenate all texts.
A__ = {k: sum(examples[k], [] ) for k in examples.keys()}
A__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
A__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
A__ = {
k: [t[i : i + args.max_length] for i in range(0, UpperCAmelCase_, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
A__ = dataset_tokenized.map(UpperCAmelCase_, batched=UpperCAmelCase_, batch_size=1000, num_proc=4 )
A__ = 0
A__ = 0
for shard in range(0, len(UpperCAmelCase_ ), args.shard_size ):
A__ = grouped_dataset[shard : shard + args.shard_size]
A__ = len(dataset_snapshot["input_ids"] )
A__ = os.path.join(UpperCAmelCase_, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
A__ = get_serialized_examples(UpperCAmelCase_ )
with tf.io.TFRecordWriter(UpperCAmelCase_ ) as out_file:
for i in range(len(UpperCAmelCase_ ) ):
A__ = serialized_examples[i]
out_file.write(UpperCAmelCase_ )
print("Wrote file {} containing {} records".format(UpperCAmelCase_, UpperCAmelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = parse_args()
main(args)
| 104 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> Tuple:
for attribute in key.split("." ):
lowercase : Dict = getattr(A ,A )
if weight_type is not None:
lowercase : List[str] = getattr(A ,A ).shape
else:
lowercase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : List[Any] = value
elif weight_type == "weight_g":
lowercase : List[Any] = value
elif weight_type == "weight_v":
lowercase : int = value
elif weight_type == "bias":
lowercase : Any = value
elif weight_type == "running_mean":
lowercase : Tuple = value
elif weight_type == "running_var":
lowercase : Dict = value
elif weight_type == "num_batches_tracked":
lowercase : Optional[int] = value
elif weight_type == "inv_freq":
lowercase : List[Any] = value
else:
lowercase : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ,A ) -> int:
lowercase : Optional[int] = []
lowercase : Tuple = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : str = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Dict = True
else:
for key, mapped_key in MAPPING.items():
lowercase : List[str] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : List[str] = True
if "*" in mapped_key:
lowercase : int = name.split(A )[0].split("." )[-2]
lowercase : Optional[Any] = mapped_key.replace("*" ,A )
if "pos_bias_u" in name:
lowercase : str = None
elif "pos_bias_v" in name:
lowercase : Optional[int] = None
elif "weight_g" in name:
lowercase : int = "weight_g"
elif "weight_v" in name:
lowercase : Dict = "weight_v"
elif "bias" in name:
lowercase : List[str] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] = "weight"
elif "running_mean" in name:
lowercase : Optional[int] = "running_mean"
elif "inv_freq" in name:
lowercase : Dict = "inv_freq"
elif "running_var" in name:
lowercase : int = "running_var"
elif "num_batches_tracked" in name:
lowercase : Optional[Any] = "num_batches_tracked"
else:
lowercase : int = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> List[str]:
lowercase : Tuple = full_name.split("conv_layers." )[-1]
lowercase : Optional[Any] = name.split("." )
lowercase : str = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ,A=None ,A=True ) -> Optional[Any]:
if config_path is not None:
lowercase : Tuple = WavaVecaConformerConfig.from_pretrained(A ,hidden_act="swish" )
else:
lowercase : Any = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : str = "rotary"
if is_finetuned:
if dict_path:
lowercase : List[str] = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : Optional[int] = target_dict.pad_index
lowercase : Optional[int] = target_dict.bos_index
lowercase : Optional[Any] = target_dict.eos_index
lowercase : str = len(target_dict.symbols )
lowercase : List[Any] = os.path.join(A ,"vocab.json" )
if not os.path.isdir(A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) )
return
os.makedirs(A ,exist_ok=A )
lowercase : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : Any = 0
lowercase : Tuple = 1
with open(A ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(A ,A )
lowercase : Tuple = WavaVecaCTCTokenizer(
A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=A ,)
lowercase : Dict = True if config.feat_extract_norm == "layer" else False
lowercase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=A ,return_attention_mask=A ,)
lowercase : List[str] = WavaVecaProcessor(feature_extractor=A ,tokenizer=A )
processor.save_pretrained(A )
lowercase : Any = WavaVecaConformerForCTC(A )
else:
lowercase : str = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowercase , lowercase , lowercase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase : List[str] = argparse.Namespace(task="audio_pretraining" )
lowercase : Union[str, Any] = fairseq.tasks.setup_task(A )
lowercase , lowercase , lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=A )
lowercase : List[str] = model[0].eval()
recursively_load_weights(A ,A ,not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 372 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
"""simple docstring"""
if hi < 0:
_lowercase: Tuple = len(_UpperCamelCase )
while lo < hi:
_lowercase: List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowercase: str = mid + 1
else:
_lowercase: List[Any] = mid
return lo
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
"""simple docstring"""
if hi < 0:
_lowercase: Optional[Any] = len(_UpperCamelCase )
while lo < hi:
_lowercase: Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowercase: str = mid + 1
else:
_lowercase: List[str] = mid
return lo
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
"""simple docstring"""
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
"""simple docstring"""
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: int = 0
_lowercase: Optional[int] = len(_UpperCamelCase ) - 1
while left <= right:
_lowercase: Tuple = left + (right - left) // 2
_lowercase: Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowercase: str = midpoint - 1
else:
_lowercase: List[str] = midpoint + 1
return None
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: int = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if right < left:
return None
_lowercase: int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
A__ : Tuple = input('Enter numbers separated by comma:\n').strip()
A__ : Optional[int] = sorted(int(item) for item in user_input.split(','))
A__ : str = int(input('Enter a single number to be found in the list:\n'))
A__ : Optional[int] = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 709 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_UpperCamelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_UpperCamelCase ) == 1:
return True
_lowercase: Any = series[1] - series[0]
for index in range(len(_UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_UpperCamelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_lowercase: Tuple = 0
for val in series:
answer += val
return answer / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase_ : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = 0
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase = os.path.join(snake_case__ , """fake-roberta""" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(os.path.join(snake_case__ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(type(snake_case__ ) , snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , snake_case__ )
# Wrong model type will raise an error
with self.assertRaises(snake_case__ ):
AutoConfig.register("""model""" , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoConfig.register("""bert""" , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCAmelCase = AutoConfig.from_pretrained("""bert-base""" )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(snake_case__ ):
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=snake_case__ )
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
UpperCAmelCase = AutoConfig.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
class UpperCamelCase_ ( a_ ):
_A : List[str] = 'new-model'
try:
AutoConfig.register("""new-model""" , snake_case__ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 673 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _lowerCAmelCase ( *lowerCAmelCase ):
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = list(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _lowerCAmelCase ( lowerCAmelCase = None , lowerCAmelCase = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(lowerCAmelCase , starting_batch_size=lowerCAmelCase )
UpperCAmelCase = starting_batch_size
def decorator(*lowerCAmelCase , **lowerCAmelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase = list(inspect.signature(lowerCAmelCase ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase ) < (len(lowerCAmelCase ) + 1):
UpperCAmelCase = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 673 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> str:
if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(lowerCamelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 701 | '''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCAmelCase_ : int = pd.read_csv("sample_data.csv", header=None)
UpperCAmelCase_ : Optional[int] = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCAmelCase_ : Any = df.iloc[:, 1:2]
UpperCAmelCase_ : Dict = actual_data.values.reshape(len_data, 1)
UpperCAmelCase_ : int = MinMaxScaler().fit_transform(actual_data)
UpperCAmelCase_ : List[str] = 10
UpperCAmelCase_ : Optional[Any] = 5
UpperCAmelCase_ : Optional[Any] = 20
UpperCAmelCase_ : int = len_data - periods * look_back
UpperCAmelCase_ : Dict = actual_data[:division]
UpperCAmelCase_ : Tuple = actual_data[division - look_back :]
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCAmelCase_ : Any = np.array(train_x)
UpperCAmelCase_ : Union[str, Any] = np.array(test_x)
UpperCAmelCase_ : List[Any] = np.array([list(i.ravel()) for i in train_y])
UpperCAmelCase_ : List[str] = np.array([list(i.ravel()) for i in test_y])
UpperCAmelCase_ : Optional[int] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
UpperCAmelCase_ : int = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCAmelCase_ : List[str] = model.predict(x_test)
| 120 |
'''simple docstring'''
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 120 | 1 |
import string
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = ''
for i in sequence:
__lowerCAmelCase = ord(lowerCAmelCase_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = string.ascii_letters
__lowerCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase_ )] if c in letters else c for c in sequence )
def a_ ( ):
from timeit import timeit
print('Running performance benchmarks...' )
__lowerCAmelCase = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)", setup=lowerCAmelCase_ )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)", setup=lowerCAmelCase_ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 421 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_ckpt', type=lowerCAmelCase_, default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs', type=lowerCAmelCase_, default=5 )
parser.add_argument('--batch_size', type=lowerCAmelCase_, default=6 )
parser.add_argument('--gradient_accumulation_steps', type=lowerCAmelCase_, default=1 )
parser.add_argument('--freeze', type=lowerCAmelCase_, default=lowerCAmelCase_ )
parser.add_argument('--learning_rate', type=lowerCAmelCase_, default=5E-4 )
parser.add_argument('--seed', type=lowerCAmelCase_, default=0 )
parser.add_argument('--lr_scheduler_type', type=lowerCAmelCase_, default='cosine' )
parser.add_argument('--num_warmup_steps', type=lowerCAmelCase_, default=10 )
parser.add_argument('--weight_decay', type=lowerCAmelCase_, default=0.01 )
parser.add_argument('--output_dir', type=lowerCAmelCase_, default='./results' )
return parser.parse_args()
_snake_case : Union[str, Any] = load('accuracy')
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase , __lowerCAmelCase = eval_pred
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=1 )
return metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : List[Any] ) -> None:
super().__init__()
__lowerCAmelCase = trainer
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> Dict:
if control.should_evaluate:
__lowerCAmelCase = deepcopy(lowerCAmelCase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def a_ ( ):
__lowerCAmelCase = get_args()
set_seed(args.seed )
__lowerCAmelCase = load_dataset('codeparrot/codecomplex', split='train' )
__lowerCAmelCase = dataset.train_test_split(test_size=0.2 )
__lowerCAmelCase = train_test['test'].train_test_split(test_size=0.5 )
__lowerCAmelCase = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowerCAmelCase = tokenizer.eos_token
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
__lowerCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__lowerCAmelCase = False
__lowerCAmelCase = ClassLabel(num_classes=7, names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tokenizer(example['src'], truncation=lowerCAmelCase_, max_length=1024 )
__lowerCAmelCase = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__lowerCAmelCase = train_test_validation.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=train_test_validation['train'].column_names, )
__lowerCAmelCase = DataCollatorWithPadding(tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy='epoch', save_strategy='epoch', logging_strategy='epoch', per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model='accuracy', run_name='complexity-java', report_to='wandb', )
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'], eval_dataset=tokenized_datasets['valid'], tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, )
print('Training...' )
trainer.add_callback(CustomCallback(lowerCAmelCase_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 421 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =BlipImageProcessor()
UpperCAmelCase_ =GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCAmelCase_ =BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
UpperCAmelCase_ =InstructBlipProcessor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: str , **_lowerCAmelCase: str ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def lowerCAmelCase__ ( self: Tuple , **_lowerCAmelCase: Any ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def lowerCAmelCase__ ( self: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).qformer_tokenizer
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase_ =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =image_processor(_lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ =processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =processor(text=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =qformer_tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ =processor.batch_decode(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 54 |
"""simple docstring"""
import os
from math import logaa
def _UpperCamelCase ( _A = "base_exp.txt" ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_A ) , _A ) ) ):
_UpperCAmelCase ,_UpperCAmelCase = list(map(_A , line.split(""",""" ) ) )
if x * logaa(_A ) > largest:
_UpperCAmelCase = x * logaa(_A )
_UpperCAmelCase = i + 1
return result
if __name__ == "__main__":
print(solution()) | 555 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = 0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = right or len(_lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowerCamelCase , _lowerCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = ShapEImgaImgPipeline
_UpperCAmelCase :int = ['image']
_UpperCAmelCase :Optional[Any] = ['image']
_UpperCAmelCase :Optional[Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
_UpperCAmelCase :Optional[Any] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 8
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCamelCase : Optional[int] = CLIPVisionModel(A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=A_ , do_normalize=A_ , do_resize=A_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
UpperCamelCase : List[str] = PriorTransformer(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : str = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : Optional[Any] = ShapERenderer(**A_ )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.dummy_prior
UpperCamelCase : str = self.dummy_image_encoder
UpperCamelCase : str = self.dummy_image_processor
UpperCamelCase : List[Any] = self.dummy_renderer
UpperCamelCase : int = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=A_ , clip_sample=A_ , clip_sample_range=1.0 , )
UpperCamelCase : Union[str, Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Union[str, Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[str] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : int = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = "cpu"
UpperCamelCase : Dict = self.get_dummy_components()
UpperCamelCase : int = self.pipeline_class(**A_ )
UpperCamelCase : Union[str, Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Tuple = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Union[str, Any] = output.images[0]
UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : Tuple = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = torch_device == "cpu"
UpperCamelCase : Optional[int] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A_ , relax_max_difference=A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**A_ )
UpperCamelCase : List[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = 1
UpperCamelCase : int = 2
UpperCamelCase : str = self.get_dummy_inputs(A_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : Tuple = batch_size * [inputs[key]]
UpperCamelCase : Optional[int] = pipe(**A_ , num_images_per_prompt=A_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
UpperCamelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
UpperCamelCase : List[str] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
UpperCamelCase : Any = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = torch.Generator(device=A_ ).manual_seed(0 )
UpperCamelCase : Dict = pipe(
A_ , generator=A_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A_ , A_ )
| 629 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A__ :
_UpperCAmelCase :Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[Any] = os.path.join(A_ , "feat_extract.json" )
feat_extract_first.to_json_file(A_ )
UpperCamelCase : str = self.feature_extraction_class.from_json_file(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : int = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
UpperCamelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.feature_extraction_class()
self.assertIsNotNone(A_ )
| 629 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCamelCase :Tuple = data_utils.TransfoXLTokenizer
lowerCamelCase :Tuple = data_utils.TransfoXLCorpus
lowerCamelCase :List[str] = data_utils
lowerCamelCase :str = data_utils
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase , '''rb''' ) as fp:
_a = pickle.load(_UpperCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_a = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"Save vocabulary to {pytorch_vocab_dump_path}" )
_a = corpus.vocab.__dict__
torch.save(_UpperCamelCase , _UpperCamelCase )
_a = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , _UpperCamelCase )
_a = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(_UpperCamelCase , _UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_a = os.path.abspath(_UpperCamelCase )
_a = os.path.abspath(_UpperCamelCase )
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_a = TransfoXLConfig()
else:
_a = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
_a = TransfoXLLMHeadModel(_UpperCamelCase )
_a = load_tf_weights_in_transfo_xl(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
_a = os.path.join(_UpperCamelCase , _UpperCamelCase )
_a = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"Save PyTorch model to {os.path.abspath(_UpperCamelCase )}" )
torch.save(model.state_dict() , _UpperCamelCase )
print(f"Save configuration file to {os.path.abspath(_UpperCamelCase )}" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowerCamelCase :Optional[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 346 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
@staticmethod
def _A ( *__UpperCamelCase: Optional[int] , **__UpperCamelCase: str ):
pass
def __snake_case ( _UpperCamelCase ) -> Dict:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase :List[str] = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
a: List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _A ( self: Dict , __UpperCamelCase: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Dict ):
_a = pipeline(
'''document-question-answering''' , model=__UpperCamelCase , tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_a = INVOICE_URL
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
_a = '''What is the placebo?'''
_a = [
{
'''image''': load_image(__UpperCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def _A ( self: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: List[str] ):
_a = dqa_pipeline(__UpperCamelCase , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
[
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: List[str] ):
_a = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_a = INVOICE_URL
_a = '''How many cats are there?'''
_a = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = []
_a = []
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , words=__UpperCamelCase , boxes=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: Tuple ):
_a = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: Dict ):
_a = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self: Union[str, Any] ):
_a = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_a = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_a = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self: List[Any] ):
_a = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_a = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_a = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def _A ( self: Optional[Any] ):
_a = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def _A ( self: str ):
pass
| 346 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Any = CustomTokenizer
pass
| 35 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase__ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ : str = bs[:]
lowercase_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase_ : Optional[int] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = set()
lowercase_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Dict = char
return pairs
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = VOCAB_FILES_NAMES
__lowerCamelCase: int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: str = ['input_ids', 'attention_mask']
def __init__( self : Tuple , a : Tuple , a : Tuple , a : int="replace" , a : Optional[int]="<s>" , a : Tuple="</s>" , a : Tuple="</s>" , a : Tuple="<s>" , a : Optional[Any]="<unk>" , a : Dict="<pad>" , a : List[str]="<mask>" , a : Tuple=False , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase_ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase_ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
lowercase_ : Any = json.load(a )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Dict = errors # how to handle errors in decoding
lowercase_ : Any = bytes_to_unicode()
lowercase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
lowercase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : List[str] = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Optional[Any] = {}
lowercase_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Any , a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ : Optional[Any] = tuple(a )
lowercase_ : Tuple = get_pairs(a )
if not pairs:
return token
while True:
lowercase_ : Any = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Dict = bigram
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = 0
while i < len(a ):
try:
lowercase_ : Union[str, Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : Any = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Any = tuple(a )
lowercase_ : List[str] = new_word
if len(a ) == 1:
break
else:
lowercase_ : Union[str, Any] = get_pairs(a )
lowercase_ : List[str] = " ".join(a )
lowercase_ : Optional[int] = word
return word
def lowerCAmelCase__ ( self : Any , a : str ):
'''simple docstring'''
lowercase_ : Dict = []
for token in re.findall(self.pat , a ):
lowercase_ : Tuple = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self : Tuple , a : Dict ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , a : str ):
'''simple docstring'''
return self.decoder.get(a )
def lowerCAmelCase__ ( self : int , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = "".join(a )
lowercase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : List[str] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Any = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
lowercase_ : Dict = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase_ : Optional[Any] = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def lowerCAmelCase__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , a : Any , a : int=False , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase_ : str = " " + text
return (text, kwargs)
| 620 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = backbone_out_indices
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = backbone_featmap_shape
__UpperCamelCase = scope
__UpperCamelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def __lowercase( self ) -> Dict:
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __lowercase( self ) -> str:
__UpperCamelCase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
__UpperCamelCase = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
__UpperCamelCase = self.num_labels
__UpperCamelCase = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __lowercase( self ) -> List[str]:
__UpperCamelCase = DPTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __lowercase( self ) -> Dict:
pass
def __lowercase( self ) -> Any:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
__UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase( self ) -> int:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = False
__UpperCamelCase = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
__UpperCamelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
__UpperCamelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__UpperCamelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase( self ) -> int:
pass
@slow
def __lowercase( self ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__UpperCamelCase = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Union[str, Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = 'add'
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _a ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> List[str]:
__UpperCamelCase = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
__UpperCamelCase = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
__UpperCamelCase = outputs.predicted_depth
# verify the predicted depth
__UpperCamelCase = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 567 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_snake_case = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(__lowercase , __lowercase )
if weight_type is not None:
__UpperCamelCase = getattr(__lowercase , __lowercase ).shape
else:
__UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _a ( __lowercase , __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__lowercase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , __lowercase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def _a ( __lowercase , __lowercase , __lowercase=None ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = torch.load(__lowercase )
__UpperCamelCase = WavLMConfigOrig(checkpoint['cfg'] )
__UpperCamelCase = WavLMOrig(__lowercase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__UpperCamelCase = WavLMConfig.from_pretrained(__lowercase )
else:
__UpperCamelCase = WavLMConfig()
__UpperCamelCase = WavLMModel(__lowercase )
recursively_load_weights(__lowercase , __lowercase )
hf_wavlm.save_pretrained(__lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_snake_case = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 567 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Dict[Optional[str], Type[Formatter]] = {}
lowercase__ : Dict[Optional[str], str] = {}
lowercase__ : Dict[Optional[str], Exception] = {}
def __lowercase ( _a , _a , _a = None , ):
snake_case_ : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
snake_case_ : Optional[int] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
snake_case_ : List[Any] = format_type
def __lowercase ( _a , _a , _a = None ):
snake_case_ : str = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ : int = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowercase__ : List[str] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowercase__ : Tuple = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowercase__ : Optional[int] = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowercase ( _a ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowercase ( _a , **_a ):
snake_case_ : Optional[int] = get_format_type_from_alias(_a )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_a )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 123 |
"""simple docstring"""
from math import sqrt
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (
number >= 0
), "'number' must been an int and positive"
snake_case_ : List[str] = True
# 0 and 1 are none primes.
if number <= 1:
snake_case_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(_a ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
snake_case_ : List[Any] = False
break
# precondition
assert isinstance(_a , _a ), "'status' must been from type bool"
return status
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
snake_case_ : int = list(range(2 , n + 1 ) )
snake_case_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_a ) ):
for j in range(i + 1 , len(_a ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
snake_case_ : List[Any] = 0
# filters actual prime numbers.
snake_case_ : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_a , _a ), "'ans' must been from type list"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n > 2), "'N' must been an int and > 2"
snake_case_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_a ):
ans.append(_a )
# precondition
assert isinstance(_a , _a ), "'ans' must been from type list"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and number >= 0, "'number' must been an int and >= 0"
snake_case_ : Optional[int] = [] # this list will be returns of the function.
# potential prime number factors.
snake_case_ : Optional[Any] = 2
snake_case_ : List[str] = number
if number == 0 or number == 1:
ans.append(_a )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_a ):
while quotient != 1:
if is_prime(_a ) and (quotient % factor == 0):
ans.append(_a )
quotient /= factor
else:
factor += 1
else:
ans.append(_a )
# precondition
assert isinstance(_a , _a ), "'ans' must been from type list"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case_ : str = 0
# prime factorization of 'number'
snake_case_ : Union[str, Any] = prime_factorization(_a )
snake_case_ : int = max(_a )
# precondition
assert isinstance(_a , _a ), "'ans' must been from type int"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case_ : List[Any] = 0
# prime factorization of 'number'
snake_case_ : Union[str, Any] = prime_factorization(_a )
snake_case_ : int = min(_a )
# precondition
assert isinstance(_a , _a ), "'ans' must been from type int"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _a ), "compare bust been from type bool"
return number % 2 == 0
def __lowercase ( _a ):
assert isinstance(_a , _a ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _a ), "compare bust been from type bool"
return number % 2 != 0
def __lowercase ( _a ):
assert (
isinstance(_a , _a ) and (number > 2) and is_even(_a )
), "'number' must been an int, even and > 2"
snake_case_ : Optional[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
snake_case_ : Dict = get_prime_numbers(_a )
snake_case_ : Optional[int] = len(_a )
# run variable for while-loops.
snake_case_ : List[str] = 0
snake_case_ : Optional[Any] = None
# exit variable. for break up the loops
snake_case_ : List[Any] = True
while i < len_pn and loop:
snake_case_ : Optional[int] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
snake_case_ : List[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_a , _a )
and (len(_a ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowercase ( _a , _a ):
assert (
isinstance(_a , _a )
and isinstance(_a , _a )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
snake_case_ : int = 0
while numbera != 0:
snake_case_ : Optional[Any] = numbera % numbera
snake_case_ : Tuple = numbera
snake_case_ : str = rest
# precondition
assert isinstance(_a , _a ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowercase ( _a , _a ):
assert (
isinstance(_a , _a )
and isinstance(_a , _a )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
snake_case_ : str = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
snake_case_ : str = prime_factorization(_a )
snake_case_ : Optional[Any] = prime_factorization(_a )
elif numbera == 1 or numbera == 1:
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = max(_a , _a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
snake_case_ : int = prime_fac_a.count(_a )
snake_case_ : Optional[Any] = prime_fac_a.count(_a )
for _ in range(max(_a , _a ) ):
ans *= n
else:
snake_case_ : Optional[Any] = prime_fac_a.count(_a )
for _ in range(_a ):
ans *= n
done.append(_a )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
snake_case_ : Any = prime_fac_a.count(_a )
for _ in range(_a ):
ans *= n
done.append(_a )
# precondition
assert isinstance(_a , _a ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n >= 0), "'number' must been a positive int"
snake_case_ : List[Any] = 0
snake_case_ : Any = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_a ):
ans += 1
# precondition
assert isinstance(_a , _a ) and is_prime(
_a ), "'ans' must been a prime number and from type int"
return ans
def __lowercase ( _a , _a ):
assert (
is_prime(_a ) and is_prime(_a ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
snake_case_ : List[str] = p_number_a + 1 # jump to the next number
snake_case_ : Tuple = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_a ):
number += 1
while number < p_number_a:
ans.append(_a )
number += 1
# fetch the next prime number.
while not is_prime(_a ):
number += 1
# precondition
assert (
isinstance(_a , _a )
and ans[0] != p_number_a
and ans[len(_a ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n >= 1), "'n' must been int and >= 1"
snake_case_ : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_a )
# precondition
assert ans[0] == 1 and ans[len(_a ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (
number > 1
), "'number' must been an int and >= 1"
snake_case_ : List[Any] = get_divisors(_a )
# precondition
assert (
isinstance(_a , _a )
and (divisors[0] == 1)
and (divisors[len(_a ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowercase ( _a , _a ):
assert (
isinstance(_a , _a )
and isinstance(_a , _a )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
snake_case_ : List[Any] = gcd(abs(_a ) , abs(_a ) )
# precondition
assert (
isinstance(_a , _a )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n >= 0), "'n' must been a int and >= 0"
snake_case_ : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __lowercase ( _a ):
assert isinstance(_a , _a ) and (n >= 0), "'n' must been an int and >= 0"
snake_case_ : List[Any] = 0
snake_case_ : int = 1
snake_case_ : Any = 1 # this will be return
for _ in range(n - 1 ):
snake_case_ : List[Any] = ans
ans += fiba
snake_case_ : Any = tmp
return ans
| 123 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = data
def __iter__( self):
'''simple docstring'''
for element in self.data:
yield element
def _lowerCAmelCase ( __snake_case : Optional[Any]=True ) -> Union[str, Any]:
__A : Dict = Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int , __snake_case : int , __snake_case : bool = False ) -> Optional[int]:
if iterable:
__A : Optional[int] = DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
__A : List[str] = TensorDataset(torch.as_tensor(range(__snake_case ) ) )
__A : Tuple = DataLoader(__snake_case , batch_size=__snake_case )
__A : Dict = accelerator.prepare(__snake_case )
return dl
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int , __snake_case : int , __snake_case : List[int] , __snake_case : List[int] , ) -> Optional[Any]:
__A : Tuple = create_dataloader(accelerator=__snake_case , dataset_size=__snake_case , batch_size=__snake_case )
__A : str = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowerCAmelCase ( ) -> Optional[int]:
__A : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowerCAmelCase ( ) -> Optional[int]:
__A : Tuple = create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowerCAmelCase ( ) -> Dict:
__A : List[str] = create_accelerator(even_batches=__snake_case )
__A : Optional[int] = torch.nn.Linear(1 , 1 )
__A : Tuple = accelerator.prepare(__snake_case )
__A : Tuple = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
__A : List[Any] = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowerCAmelCase ( __snake_case : Any ) -> Optional[Any]:
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowerCAmelCase ( ) -> Tuple:
__A : Optional[Any] = True
__A : Union[str, Any] = False
__A : List[str] = create_accelerator(even_batches=__snake_case )
__A : str = torch.nn.Linear(1 , 1 )
__A : Any = accelerator.prepare(__snake_case )
__A : Dict = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__A : Any = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__A : Optional[Any] = train_dl.batch_sampler.even_batches
__A : str = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowerCAmelCase ( ) -> str:
__A : Tuple = True
__A : List[str] = False
__A : Any = create_accelerator(even_batches=__snake_case )
__A : Optional[int] = torch.nn.Linear(1 , 1 )
__A : List[str] = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
__A : Optional[Any] = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__A : Optional[Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowerCAmelCase ( ) -> Union[str, Any]:
__A : str = create_accelerator()
__A : Tuple = torch.nn.Linear(1 , 1 )
__A : Dict = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowerCAmelCase ( ) -> Optional[int]:
__A : Dict = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : List[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
__A : Tuple = original_state
if __name__ == "__main__":
main() | 338 |
'''simple docstring'''
import sys
import turtle
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase__ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase__ : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) | 338 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int = 10**9 ):
'''simple docstring'''
_a = 1
_a = 2
_a = 0
_a = 0
_a = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_a = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def a ( self ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a ( self ):
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = ScoreSdeVeScheduler()
_UpperCamelCase = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A_ ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A_ , return_dict=A_ )[
0
]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = "google/ncsnpp-church-256"
_UpperCamelCase = UNetaDModel.from_pretrained(A_ )
_UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(A_ )
_UpperCamelCase = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A_ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 138 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "spiece.model"}
A_ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class _snake_case ( _a ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Tuple="<sep>" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<pad>" ,SCREAMING_SNAKE_CASE__ : Dict="<cls>" ,SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" ,SCREAMING_SNAKE_CASE__ : int=["<eop>", "<eod>"] ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
SCREAMING_SNAKE_CASE:Dict = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) else mask_token
SCREAMING_SNAKE_CASE:List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,additional_special_tokens=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:List[str] = 3
SCREAMING_SNAKE_CASE:str = do_lower_case
SCREAMING_SNAKE_CASE:List[str] = remove_space
SCREAMING_SNAKE_CASE:Optional[Any] = keep_accents
SCREAMING_SNAKE_CASE:str = vocab_file
SCREAMING_SNAKE_CASE:Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
SCREAMING_SNAKE_CASE:Union[str, Any] = jieba
SCREAMING_SNAKE_CASE:int = str.maketrans(" \n" ,"\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __UpperCamelCase ( self : List[str] ):
return len(self.sp_model )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
SCREAMING_SNAKE_CASE:List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE:Optional[int] = None
return state
def __setstate__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:Dict = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
SCREAMING_SNAKE_CASE:Optional[Any] = {}
SCREAMING_SNAKE_CASE:int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : int ):
if self.remove_space:
SCREAMING_SNAKE_CASE:Tuple = " ".join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE:List[str] = inputs
SCREAMING_SNAKE_CASE:Optional[int] = outputs.replace("``" ,"\"" ).replace("''" ,"\"" )
if not self.keep_accents:
SCREAMING_SNAKE_CASE:str = unicodedata.normalize("NFKD" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = "".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE:Tuple = outputs.lower()
return outputs
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Dict = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE:Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,"" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE:Tuple = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE:Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str ):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[Any] = "".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ ," " ).strip()
return out_string
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE:int = [self.sep_token_id]
SCREAMING_SNAKE_CASE:List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1]
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE:Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE:int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE:Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ ,"wb" ) as fi:
SCREAMING_SNAKE_CASE:Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def __UpperCamelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:Tuple = super()._decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = text.replace(" " ,"" ).replace("\u2582" ," " ).replace("\u2583" ,"\n" )
return text
| 465 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
A_ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
A_ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def A_ ( snake_case , snake_case , snake_case=False , snake_case=False , snake_case=True , snake_case=False , snake_case="dummy_doc" ):
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: key_lines}
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: sys_lines}
SCREAMING_SNAKE_CASE:Dict = {}
SCREAMING_SNAKE_CASE:Optional[int] = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.get_doc_mentions(snake_case , key_doc_lines[doc] , snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:Dict = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.get_doc_mentions(snake_case , sys_doc_lines[doc] , snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:str = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
if remove_nested:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.remove_nested_coref_mentions(snake_case , snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.remove_nested_coref_mentions(snake_case , snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE:Optional[Any] = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = get_coref_infos(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {}
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = evaluator.evaluate_documents(snake_case , snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE:Any = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE:Tuple = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE:List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) ,codebase_urls=["https://github.com/ns-moosavi/coval"] ,reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] ,)
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : str=False ):
SCREAMING_SNAKE_CASE:Optional[int] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE:Optional[int] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE:List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ ,sys_lines=SCREAMING_SNAKE_CASE__ ,metrics=SCREAMING_SNAKE_CASE__ ,NP_only=SCREAMING_SNAKE_CASE__ ,remove_nested=SCREAMING_SNAKE_CASE__ ,keep_singletons=SCREAMING_SNAKE_CASE__ ,min_span=SCREAMING_SNAKE_CASE__ ,)
return score
| 465 | 1 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
a = threading.Lock()
a = None
a = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
a = logging.WARNING
a = True
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_VERBOSITY""" , __UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def __magic_name__ ( ) -> str:
'''simple docstring'''
return __name__.split(""".""" )[0]
def __magic_name__ ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __magic_name__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
__SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
__SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__SCREAMING_SNAKE_CASE = False
def __magic_name__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__SCREAMING_SNAKE_CASE = None
def __magic_name__ ( ) -> int:
'''simple docstring'''
return log_levels
def __magic_name__ ( __UpperCAmelCase = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
__SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__UpperCAmelCase )
def __magic_name__ ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(__UpperCAmelCase )
def __magic_name__ ( ) -> str:
'''simple docstring'''
return set_verbosity(__UpperCAmelCase )
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
return set_verbosity(__UpperCAmelCase )
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
return set_verbosity(__UpperCAmelCase )
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
return set_verbosity(__UpperCAmelCase )
def __magic_name__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __magic_name__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__UpperCAmelCase )
def __magic_name__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
__SCREAMING_SNAKE_CASE = False
def __magic_name__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
__SCREAMING_SNAKE_CASE = True
def __magic_name__ ( ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
__SCREAMING_SNAKE_CASE = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(__UpperCAmelCase )
def __magic_name__ ( ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__UpperCAmelCase )
def __magic_name__ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , __UpperCAmelCase )
if no_advisory_warnings:
return
self.warning(*__UpperCAmelCase , **__UpperCAmelCase )
a = warning_advice
@functools.lru_cache(__UpperCAmelCase )
def __magic_name__ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
self.warning(*__UpperCAmelCase , **__UpperCAmelCase )
a = warning_once
class __a :
def __init__( self : List[Any] ,*lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Optional[int] ): # pylint: disable=unused-argument
'''simple docstring'''
__SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[int] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] ,lowerCamelCase : int ):
'''simple docstring'''
def empty_fn(*lowerCamelCase : Any ,**lowerCamelCase : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ):
'''simple docstring'''
return self
def __exit__( self : List[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
return
class __a :
def __call__( self : Optional[Any] ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCamelCase ,**lowerCamelCase )
else:
return EmptyTqdm(*lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,*lowerCamelCase : List[Any] ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a = _tqdm_cls()
def __magic_name__ ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
global _tqdm_active
__SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
global _tqdm_active
__SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 109 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(**lowerCamelCase )
# save in new folder
model_config.save_pretrained(lowerCamelCase )
config.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,"""clip-base is not a local folder and is not a valid model identifier""" ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ,revision="""aaaaaa""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ,trust_remote_code=lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase )
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
class __a ( _snake_case ):
__UpperCamelCase : int = True
try:
AutoConfig.register("""custom""" ,lowerCamelCase )
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCamelCase ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 109 | 1 |
'''simple docstring'''
from math import isqrt
def _A ( A__ ):
"""simple docstring"""
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A__ , A__ ):
__lowercase = False
return [i for i in range(2 , A__ ) if is_prime[i]]
def _A ( A__ = 10**8 ):
"""simple docstring"""
__lowercase = calculate_prime_numbers(max_number // 2 )
__lowercase = 0
__lowercase = 0
__lowercase = len(A__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 624 |
'''simple docstring'''
def _A ( ):
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _A ( A__ ):
"""simple docstring"""
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 624 | 1 |
lowercase_ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowercase_ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowercase_ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowercase_ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowercase_ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowercase_ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowercase_ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowercase_ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 154 |
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->int:
"""simple docstring"""
return abs(UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a, UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__magic_name__ , __magic_name__ : List[str] = y, x % y
return abs(UpperCAmelCase )
def lowerCAmelCase ( ) ->List[Any]:
"""simple docstring"""
try:
__magic_name__ : List[str] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
__magic_name__ : List[str] = int(nums[0] )
__magic_name__ : int = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(UpperCAmelCase, UpperCAmelCase )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCAmelCase, UpperCAmelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 154 | 1 |
__snake_case :Dict =[
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case :Tuple =[
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case :Tuple =[
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case :List[Any] =[
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case :Optional[int] =[
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case :Any =[
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case :Dict =[
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case :Any =[
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 224 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case :Tuple ='src/transformers'
__snake_case :Dict ='docs/source/en'
__snake_case :Dict ='.'
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start prompt.
A = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
A = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case :List[Any] ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__snake_case :List[Any] =re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case :List[str] =re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case :Tuple =re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case :int =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
A = 2 if text == '✅' or text == '❌' else len(lowerCAmelCase__ )
A = (width - text_length) // 2
A = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
A = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
A = None
if attr_name.endswith('Tokenizer' ):
A = slow_tokenizers
A = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
A = fast_tokenizers
A = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
A = tf_models
A = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
A = flax_models
A = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
A = pt_models
A = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
A = True
break
# Try again after removing the last word in the name
A = ''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
A = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
A = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
A = [len(lowerCAmelCase__ ) + 2 for c in columns]
A = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
A = '|' + '|'.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
A = {True: '✅', False: '❌'}
for name in model_names:
A = model_name_to_prefix[name]
A = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
A , A , A , A = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
A = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case :List[Any] =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 224 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_lowercase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowerCAmelCase__ ( __magic_name__ = "dhaka" , __magic_name__ = 5 ) ->int:
__lowercase = min(__magic_name__ , 5_0 ) # Prevent abuse!
__lowercase = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
__lowercase = requests.get("https://www.google.com/search" , params=__magic_name__ , headers=__magic_name__ )
__lowercase = BeautifulSoup(html.text , "html.parser" )
__lowercase = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__lowercase = json.dumps(__magic_name__ )
__lowercase = json.loads(__magic_name__ )
__lowercase = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __magic_name__ , )
if not matched_google_image_data:
return 0
__lowercase = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__magic_name__ ) , )
__lowercase = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __magic_name__ , )
for index, fixed_full_res_image in enumerate(__magic_name__ ):
if index >= max_images:
return index
__lowercase = bytes(__magic_name__ , "ascii" ).decode(
"unicode-escape" )
__lowercase = bytes(__magic_name__ , "ascii" ).decode(
"unicode-escape" )
__lowercase = urllib.request.build_opener()
__lowercase = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(__magic_name__ )
__lowercase = F'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
urllib.request.urlretrieve( # noqa: S310
__magic_name__ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
_lowercase = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('''Please provide a search term.''')
raise
| 118 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__A , """num_attention_heads""" ) )
class UpperCamelCase_ :
def __init__( self :List[str] , __A :int , __A :List[Any]=13 , __A :Tuple=32 , __A :Optional[Any]=2 , __A :Optional[Any]=3 , __A :Tuple=640 , __A :Optional[int]=4 , __A :Optional[int]="silu" , __A :Any=3 , __A :List[str]=32 , __A :List[Any]=0.1 , __A :Any=0.1 , __A :Union[str, Any]=0.1 , __A :str=0.0_2 , __A :List[Any]=True , __A :Tuple=True , __A :Optional[int]=10 , __A :Dict=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = last_hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = conv_kernel_size
SCREAMING_SNAKE_CASE__ = output_stride
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self :int , __A :int , __A :List[Any] , __A :Optional[Any] , __A :Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MobileViTModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self :int , __A :str , __A :str , __A :List[str] , __A :Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MobileViTForImageClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self :Any , __A :Any , __A :Union[str, Any] , __A :List[Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MobileViTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE__ = MobileViTConfigTester(self , config_class=__A , has_text_modality=__A )
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def _snake_case ( self :Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def _snake_case ( self :Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__A )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self :str ) -> Any:
"""simple docstring"""
pass
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
def check_hidden_states_output(__A :Union[str, Any] , __A :Any , __A :Any ):
SCREAMING_SNAKE_CASE__ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE__ = outputs.hidden_states
SCREAMING_SNAKE_CASE__ = 5
self.assertEqual(len(__A ) , __A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE__ = 2
for i in range(len(__A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__A , __A , __A )
def _snake_case ( self :List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = MobileViTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def _snake_case ( self :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(__A )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__A )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
@slow
def _snake_case ( self :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ = model.to(__A )
SCREAMING_SNAKE_CASE__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__A )
SCREAMING_SNAKE_CASE__ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-4 ) )
@slow
def _snake_case ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ = model.to(__A )
SCREAMING_SNAKE_CASE__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__A )
SCREAMING_SNAKE_CASE__ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE__ = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __A )
SCREAMING_SNAKE_CASE__ = image_processor.post_process_semantic_segmentation(outputs=__A )
SCREAMING_SNAKE_CASE__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __A ) | 59 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"image": Image()} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "image"
lowerCamelCase_ = "labels"
def _snake_case ( self :List[str] , __A :Tuple ) -> Tuple:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :Dict ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
} | 59 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowerCAmelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=64 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : str =parent
SCREAMING_SNAKE_CASE : Optional[Any] =batch_size
SCREAMING_SNAKE_CASE : Any =seq_length
SCREAMING_SNAKE_CASE : Optional[int] =is_training
SCREAMING_SNAKE_CASE : Dict =use_input_mask
SCREAMING_SNAKE_CASE : List[str] =use_token_type_ids
SCREAMING_SNAKE_CASE : Any =use_labels
SCREAMING_SNAKE_CASE : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE : List[Any] =hidden_size
SCREAMING_SNAKE_CASE : List[str] =embedding_size
SCREAMING_SNAKE_CASE : Any =num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] =intermediate_size
SCREAMING_SNAKE_CASE : int =hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str =max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] =type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple =initializer_range
SCREAMING_SNAKE_CASE : str =num_labels
SCREAMING_SNAKE_CASE : Optional[Any] =num_choices
SCREAMING_SNAKE_CASE : List[str] =scope
def __a ( self ) -> int:
SCREAMING_SNAKE_CASE : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] =None
SCREAMING_SNAKE_CASE : Dict =None
SCREAMING_SNAKE_CASE : Optional[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Tuple =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Optional[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] =MobileBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : int =model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
SCREAMING_SNAKE_CASE : Dict =model(snake_case_ , token_type_ids=snake_case_ )
SCREAMING_SNAKE_CASE : int =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] =MobileBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] =model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Tuple =MobileBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] =model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
SCREAMING_SNAKE_CASE : int =MobileBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : int =model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
SCREAMING_SNAKE_CASE : Dict =MobileBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple =model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
SCREAMING_SNAKE_CASE : Tuple =self.num_labels
SCREAMING_SNAKE_CASE : int =MobileBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] =model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] =self.num_labels
SCREAMING_SNAKE_CASE : int =MobileBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] =model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict =self.num_choices
SCREAMING_SNAKE_CASE : str =MobileBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] =model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> Any:
SCREAMING_SNAKE_CASE : Union[str, Any] =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : str =config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def __a ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] =super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
SCREAMING_SNAKE_CASE : Tuple =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def __a ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] =MobileBertModelTester(self )
SCREAMING_SNAKE_CASE : str =ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> str:
SCREAMING_SNAKE_CASE : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def __a ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def __a ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def __a ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def __a ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def __a ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def __a ( self ) -> str:
SCREAMING_SNAKE_CASE : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
return torch.tensor(
__a , dtype=torch.long , device=__a , )
_A = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] =MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple =_long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] =model(snake_case_ )[0]
SCREAMING_SNAKE_CASE : Dict =torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] =torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=snake_case_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
SCREAMING_SNAKE_CASE : Optional[Any] =torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
SCREAMING_SNAKE_CASE : Union[str, Any] =torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 258 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] =tf.convert_to_tensor(__a )
SCREAMING_SNAKE_CASE : Union[str, Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] =tf.convert_to_tensor(__a )
SCREAMING_SNAKE_CASE : str =tf.cast(math.pi , x.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] =tf.cast(0.044715 , x.dtype )
SCREAMING_SNAKE_CASE : Dict =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__a , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple =tf.convert_to_tensor(__a )
return x * tf.tanh(tf.math.softplus(__a ) )
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any =tf.convert_to_tensor(__a )
SCREAMING_SNAKE_CASE : Optional[Any] =tf.cast(0.044715 , x.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] =tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] =tf.convert_to_tensor(__a )
SCREAMING_SNAKE_CASE : List[str] =tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
return tf.clip_by_value(_gelu(__a ) , -10 , 10 )
def lowerCAmelCase_ ( __a , __a=-1 ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] =tf.split(__a , 2 , axis=__a )
return a * tf.math.sigmoid(__a )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
return tf.keras.activations.gelu(__a , approximate=__a )
_A = tf.keras.activations.gelu
_A = approximate_gelu_wrap
else:
_A = _gelu
_A = _gelu_new
_A = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 258 | 1 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = 0
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCamelCase ) , 0 )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
# Check that tokenizer_type ≠ model_type
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase , config=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase , '''vocab.txt''' ) )
snake_case__ : int = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''bert''' , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase , '''merges.txt''' ) )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase , '''vocab.txt''' ) )
snake_case__ : str = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase , '''merges.txt''' ) )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
with pytest.raises(lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case__ : List[str] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCamelCase , lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
snake_case__ : Tuple = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = TOKENIZER_MAPPING.values()
snake_case__ : Dict = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase ) , lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=lowerCamelCase )
snake_case__ : List[Any] = '''Hello, world. How are you?'''
snake_case__ : Dict = tokenizer.tokenize(lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
snake_case__ : Dict = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=lowerCamelCase )
snake_case__ : int = tokenizer.tokenize(lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = get_tokenizer_config('''bert-base-cased''' )
snake_case__ : Dict = config.pop('''_commit_hash''' , lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case__ : int = get_tokenizer_config(lowerCamelCase )
self.assertDictEqual(lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Dict = get_tokenizer_config(lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
snake_case__ : Dict = CustomTokenizer.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : str = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCamelCase , slow_tokenizer_class=lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Tuple = BertTokenizerFast.from_pretrained(lowerCamelCase )
bert_tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[Any] = CustomTokenizerFast.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ):
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
snake_case__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = False
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = NewTokenizer
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
# If remote code is not set, the default is to use local
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case__ : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case__ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case__ : Dict = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase , revision='''aaaaaa''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 694 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_snake_case : Dict = (3, 9, -11, 0, 7, 5, 1, -1)
_snake_case : List[str] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : Iterable[int] ) -> None:
__lowerCAmelCase = None
for i in sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ):
__lowerCAmelCase = Node(lowerCAmelCase_ , self.head )
def __iter__( self : List[Any] ) -> Iterator[int]:
__lowerCAmelCase = self.head
while node:
yield node.data
__lowerCAmelCase = node.next_node
def __len__( self : str ) -> int:
return sum(1 for _ in self )
def __str__( self : str ) -> str:
return " -> ".join([str(lowerCAmelCase_ ) for node in self] )
def a_ ( lowerCAmelCase_ : SortedLinkedList, lowerCAmelCase_ : SortedLinkedList ):
return SortedLinkedList(list(lowerCAmelCase_ ) + list(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 53 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = """▁"""
A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
A = {
"""google/pegasus-xsum""": 512,
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PegasusTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_):
raise TypeError(
F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is"
F" {type(UpperCamelCase_)}")
__UpperCAmelCase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1)
]
if len(set(UpperCamelCase_)) != len(UpperCamelCase_):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.")
__UpperCAmelCase : str = additional_special_tokens_extended
else:
__UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)]
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : List[str] = False if not self.vocab_file else True
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}")
return [1 if x in all_special_ids else 0 for x in seq]
def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_)
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_):
copyfile(self.vocab_file , UpperCamelCase_)
return (out_vocab_file,)
| 77 | 0 |
'''simple docstring'''
from math import factorial
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int ) ->int:
'''simple docstring'''
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 411 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Dict=10 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=32 * 4 , UpperCamelCase_ : int=32 * 6 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Tuple=32 , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = parent
_lowercase : List[str] = batch_size
_lowercase : Union[str, Any] = is_training
_lowercase : List[str] = use_auxiliary_loss
_lowercase : Tuple = num_queries
_lowercase : Union[str, Any] = num_channels
_lowercase : Tuple = min_size
_lowercase : Optional[Any] = max_size
_lowercase : List[Any] = num_labels
_lowercase : Tuple = mask_feature_size
def __lowercase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
_lowercase : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ )
_lowercase : List[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5
).float()
_lowercase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long()
_lowercase : str = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Tuple ) -> Any:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __lowercase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Dict = self.prepare_config_and_inputs()
_lowercase : Tuple = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __lowercase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
_lowercase : Optional[Any] = output.encoder_hidden_states
_lowercase : str = output.pixel_decoder_hidden_states
_lowercase : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers )
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
_lowercase : Any = MaskFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Union[str, Any] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
_lowercase : List[Any] = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(UpperCamelCase_ : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowercase : List[Any] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
_lowercase : str = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
_lowercase : Dict = model(
pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case_ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ) -> Dict:
'''simple docstring'''
_lowercase : str = MaskFormerModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def __lowercase ( self : Any ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Any ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __lowercase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __lowercase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __lowercase ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __lowercase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self : List[Any] ) -> str:
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(UpperCamelCase_ )
_lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : int = [*signature.parameters.keys()]
_lowercase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@slow
def __lowercase ( self : Optional[int] ) -> Any:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowercase : Any = MaskFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __lowercase ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : int = (self.model_tester.min_size,) * 2
_lowercase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCamelCase_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=UpperCamelCase_ ),
'''class_labels''': torch.zeros(2 , 10 , device=UpperCamelCase_ ).long(),
}
_lowercase : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ )
_lowercase : Dict = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def __lowercase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
_lowercase : List[str] = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowercase : List[Any] = self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs()
_lowercase : Optional[int] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
_lowercase : Any = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss
loss.backward()
def __lowercase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : Dict = self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs()
_lowercase : Optional[Any] = True
_lowercase : Optional[int] = True
_lowercase : Any = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
_lowercase : Optional[Any] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
_lowercase : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowercase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowercase : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowercase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def _SCREAMING_SNAKE_CASE( ) ->Tuple:
'''simple docstring'''
_lowercase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowercase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __lowercase ( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(UpperCamelCase_ )
_lowercase : Optional[Any] = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : str = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
_lowercase : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase : Dict = model(**UpperCamelCase_ )
_lowercase : Any = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
_lowercase : List[str] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
_lowercase : List[str] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def __lowercase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(UpperCamelCase_ )
.eval()
)
_lowercase : Optional[int] = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
_lowercase : List[str] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase : Optional[Any] = model(**UpperCamelCase_ )
# masks_queries_logits
_lowercase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowercase : int = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_lowercase : List[str] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
_lowercase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowercase : Tuple = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def __lowercase ( self : Any ) -> List[str]:
'''simple docstring'''
_lowercase : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(UpperCamelCase_ )
.eval()
)
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_img()
_lowercase : Dict = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
_lowercase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase : Optional[int] = model(**UpperCamelCase_ )
# masks_queries_logits
_lowercase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowercase : List[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_lowercase : Dict = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
_lowercase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowercase : List[str] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def __lowercase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(UpperCamelCase_ )
.eval()
)
_lowercase : List[str] = self.default_image_processor
_lowercase : Any = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowercase : Optional[int] = inputs['''pixel_values'''].to(UpperCamelCase_ )
_lowercase : int = [el.to(UpperCamelCase_ ) for el in inputs['''mask_labels''']]
_lowercase : Any = [el.to(UpperCamelCase_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowercase : List[str] = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 411 | 1 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[int],_SCREAMING_SNAKE_CASE : Any,_SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__A= OmegaConf.load(_SCREAMING_SNAKE_CASE )
__A= torch.load(_SCREAMING_SNAKE_CASE,map_location='cpu' )['model']
__A= list(state_dict.keys() )
# extract state_dict for VQVAE
__A= {}
__A= 'first_stage_model.'
for key in keys:
if key.startswith(_SCREAMING_SNAKE_CASE ):
__A= state_dict[key]
# extract state_dict for UNetLDM
__A= {}
__A= 'model.diffusion_model.'
for key in keys:
if key.startswith(_SCREAMING_SNAKE_CASE ):
__A= state_dict[key]
__A= config.model.params.first_stage_config.params
__A= config.model.params.unet_config.params
__A= VQModel(**_SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
__A= UNetLDMModel(**_SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(_SCREAMING_SNAKE_CASE )
__A= DDIMScheduler(
timesteps=config.model.params.timesteps,beta_schedule='scaled_linear',beta_start=config.model.params.linear_start,beta_end=config.model.params.linear_end,clip_sample=_SCREAMING_SNAKE_CASE,)
__A= LDMPipeline(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 186 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a__ ( a_ ):
'''simple docstring'''
A : Tuple = '''xlm-roberta-xl'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=250_880 , lowerCAmelCase_ : Optional[int]=2_560 , lowerCAmelCase_ : Optional[Any]=36 , lowerCAmelCase_ : List[str]=32 , lowerCAmelCase_ : Optional[Any]=10_240 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=514 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : List[str]=1E-05 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Optional[int]="absolute" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : List[str] , ) -> str:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__A= vocab_size
__A= hidden_size
__A= num_hidden_layers
__A= num_attention_heads
__A= hidden_act
__A= intermediate_size
__A= hidden_dropout_prob
__A= attention_probs_dropout_prob
__A= max_position_embeddings
__A= type_vocab_size
__A= initializer_range
__A= layer_norm_eps
__A= position_embedding_type
__A= use_cache
__A= classifier_dropout
class a__ ( a_ ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__A= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 186 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Any = abs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : List[str] = abs(SCREAMING_SNAKE_CASE_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return sum(int(SCREAMING_SNAKE_CASE_ ) for c in str(abs(SCREAMING_SNAKE_CASE_ ) ) )
def lowerCAmelCase__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
lowerCAmelCase__ : List[str] = F'''{func.__name__}({value})'''
lowerCAmelCase__ : int = timeit(F'''__main__.{call}''' , setup='import __main__' )
print(F'''{call:56} = {func(SCREAMING_SNAKE_CASE_ )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 69 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : List[str] = images
return encoded_inputs
def _lowerCamelCase ( self : Any , a : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor | 69 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCAmelCase : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCAmelCase : str = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE__ ) - np.asarray(SCREAMING_SNAKE_CASE__ )) ** 2 ) )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> Dict:
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
benchmark()
| 289 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 590 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = None , __lowercase = None , **__lowercase , ):
super().__init__(
__lowercase , split=__lowercase , features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , streaming=__lowercase , num_proc=__lowercase , **__lowercase , )
__lowerCAmelCase = field
__lowerCAmelCase = path_or_paths if isinstance(__lowercase , __lowercase ) else {self.split: path_or_paths}
__lowerCAmelCase = Json(
cache_dir=__lowercase , data_files=__lowercase , features=__lowercase , field=__lowercase , **__lowercase , )
def _snake_case (self ):
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__lowercase , in_memory=self.keep_in_memory )
return dataset
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
__lowerCAmelCase = dataset
__lowerCAmelCase = path_or_buf
__lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCAmelCase = num_proc
__lowerCAmelCase = '''utf-8'''
__lowerCAmelCase = to_json_kwargs
def _snake_case (self ):
__lowerCAmelCase = self.to_json_kwargs.pop('''path_or_buf''' , __lowercase )
__lowerCAmelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
__lowerCAmelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__lowerCAmelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__lowerCAmelCase = self.to_json_kwargs.pop('''compression''' , __lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowercase ) as buffer:
__lowerCAmelCase = self._write(file_obj=__lowercase , orient=__lowercase , lines=__lowercase , index=__lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
__lowerCAmelCase = self._write(
file_obj=self.path_or_buf , orient=__lowercase , lines=__lowercase , index=__lowercase , **self.to_json_kwargs )
return written
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = args
__lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(__lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCAmelCase = batch.to_pandas().to_json(
path_or_buf=__lowercase , orient=__lowercase , lines=__lowercase , index=__lowercase , **__lowercase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ):
__lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__lowerCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowercase )
else:
__lowerCAmelCase , __lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowercase , __lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowercase )
return written
| 702 |
'''simple docstring'''
from PIL import Image
def __magic_name__( lowerCamelCase, lowerCamelCase):
def brightness(lowerCamelCase) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''')
return img.point(lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
_UpperCAmelCase : str = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 474 | 0 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ):
__snake_case : str = BeautifulSoup(requests.get(UpperCAmelCase__ , params=UpperCAmelCase__ ).content , 'html.parser' )
__snake_case : List[Any] = soup.find('div' , attrs={'class': 'gs_ri'} )
__snake_case : Union[str, Any] = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
__magic_name__ = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 576 |
from __future__ import annotations
from math import pow, sqrt
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 623 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
snake_case_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
snake_case_ = self.diffusers_dir
shutil.copy(
os.path.join(UpperCAmelCase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _lowercase ( self ):
snake_case_ = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
snake_case_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
snake_case_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
snake_case_ = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_ )
snake_case_ = os.path.join(self.diffusers_dir , "new_code.py" )
with open(UpperCAmelCase_ , "w" , newline="\n" ) as f:
f.write(UpperCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase_ )
with open(UpperCAmelCase_ , "r" ) as f:
self.assertTrue(f.read() , UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( self ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , UpperCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , UpperCAmelCase_ ) , )
# Copy consistency with a really long name
snake_case_ = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , UpperCAmelCase_ , UpperCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , UpperCAmelCase_ , overwrite_result=re.sub("DDPM" , "Test" , UpperCAmelCase_ ) , )
| 715 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase_ ):
snake_case_ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["fail_state"]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["value"] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["fail_state"]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["fail_state"]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 | 0 |
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : List[Any] =hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_lowerCamelCase : Dict =hex_num[0] == '-'
if is_negative:
_lowerCamelCase : List[str] =hex_num[1:]
try:
_lowerCamelCase : Optional[Any] =int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_lowerCamelCase : List[str] =''
while int_num > 0:
_lowerCamelCase : List[str] =str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , lowercase_ : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] =3
_lowerCamelCase : Dict =250
_lowerCamelCase : Tuple =ids_tensor((batch_size, length) , lowercase_ )
_lowerCamelCase : str =torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self._get_tensors(5 )
_lowerCamelCase : Dict =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : List[str] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =MaxLengthCriteria(max_length=10 )
_lowerCamelCase , _lowerCamelCase : List[str] =self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Optional[int] =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_lowerCamelCase , _lowerCamelCase : str =self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase , _lowerCamelCase : Dict =self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase : Optional[Any] =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self._get_tensors(5 )
_lowerCamelCase : Tuple =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
_lowerCamelCase : int =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_lowerCamelCase : Optional[Any] =validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase_ ) , 1 )
| 464 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _a , _a ):
a__ : Optional[Any] = """maskformer-swin"""
a__ : int = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , _lowercase : Any=2_24 , _lowercase : Any=4 , _lowercase : Union[str, Any]=3 , _lowercase : Dict=96 , _lowercase : int=[2, 2, 6, 2] , _lowercase : List[str]=[3, 6, 12, 24] , _lowercase : Optional[Any]=7 , _lowercase : List[str]=4.0 , _lowercase : int=True , _lowercase : List[str]=0.0 , _lowercase : List[str]=0.0 , _lowercase : str=0.1 , _lowercase : Optional[int]="gelu" , _lowercase : Union[str, Any]=False , _lowercase : str=0.02 , _lowercase : str=1E-5 , _lowercase : int=None , _lowercase : int=None , **_lowercase : int , ):
super().__init__(**snake_case_ )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
__UpperCAmelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 720 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def a ( *_lowercase : List[Any] , **_lowercase : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : List[str] ):
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , image_processor=_lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a ( self : int , _lowercase : List[Any] , _lowercase : str ):
__UpperCAmelCase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
import datasets
__UpperCAmelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__UpperCAmelCase = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__UpperCAmelCase = object_detector(_lowercase , threshold=0.0 )
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def a ( self : Dict ):
pass
@require_torch
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(_lowercase )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_lowercase )
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
] , )
@require_torch
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(_lowercase )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_lowercase )
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def a ( self : Dict ):
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def a ( self : Tuple ):
__UpperCAmelCase = 0.9_985
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_lowercase )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def a ( self : List[str] ):
__UpperCAmelCase = '''Narsil/layoutlmv3-finetuned-funsd'''
__UpperCAmelCase = 0.9_993
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase , threshold=_lowercase )
__UpperCAmelCase = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
] , )
| 397 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCAmelCase :Optional[Any] = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 561 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__magic_name__ : Optional[int] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A , cache_dir=_A )
__magic_name__ : Any = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A )[0] , 'snapshots' ) )]
__magic_name__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) -> List[Any]:
__magic_name__ , __magic_name__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A )
__magic_name__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : List[Any] = jax.random.PRNGKey(0 )
__magic_name__ : Optional[Any] = 4
__magic_name__ : str = jax.device_count()
__magic_name__ : Any = num_samples * [prompt]
__magic_name__ : Union[str, Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : Optional[Any] = replicate(_A )
__magic_name__ : Dict = jax.random.split(_A , _A )
__magic_name__ : Union[str, Any] = shard(_A )
__magic_name__ : Union[str, Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(_A , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__magic_name__ : Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_A ) == num_samples
def __lowerCAmelCase ( self : str ) -> Optional[int]:
__magic_name__ , __magic_name__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_A )
__magic_name__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : int = jax.random.PRNGKey(0 )
__magic_name__ : Tuple = 50
__magic_name__ : Any = jax.device_count()
__magic_name__ : Dict = num_samples * [prompt]
__magic_name__ : List[Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : str = replicate(_A )
__magic_name__ : Any = jax.random.split(_A , _A )
__magic_name__ : Optional[Any] = shard(_A )
__magic_name__ : Optional[Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
__magic_name__ , __magic_name__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A )
__magic_name__ : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : List[str] = jax.random.PRNGKey(0 )
__magic_name__ : int = 50
__magic_name__ : Optional[Any] = jax.device_count()
__magic_name__ : int = num_samples * [prompt]
__magic_name__ : int = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : int = replicate(_A )
__magic_name__ : List[Any] = jax.random.split(_A , _A )
__magic_name__ : Optional[int] = shard(_A )
__magic_name__ : Union[str, Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ , __magic_name__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
__magic_name__ : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : int = jax.random.PRNGKey(0 )
__magic_name__ : Tuple = 50
__magic_name__ : int = jax.device_count()
__magic_name__ : Union[str, Any] = num_samples * [prompt]
__magic_name__ : Dict = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : List[str] = replicate(_A )
__magic_name__ : str = jax.random.split(_A , _A )
__magic_name__ : Tuple = shard(_A )
__magic_name__ : Optional[Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : List[str] = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_A , steps_offset=1 , )
__magic_name__ , __magic_name__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , )
__magic_name__ : Tuple = scheduler.create_state()
__magic_name__ : Optional[Any] = scheduler_state
__magic_name__ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : Dict = jax.random.PRNGKey(0 )
__magic_name__ : List[Any] = 50
__magic_name__ : Any = jax.device_count()
__magic_name__ : List[str] = num_samples * [prompt]
__magic_name__ : List[Any] = pipeline.prepare_inputs(_A )
# shard inputs and rng
__magic_name__ : Union[str, Any] = replicate(_A )
__magic_name__ : Tuple = jax.random.split(_A , _A )
__magic_name__ : str = shard(_A )
__magic_name__ : int = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
__magic_name__ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__magic_name__ : Union[str, Any] = jax.device_count()
__magic_name__ : Optional[Any] = num_samples * [prompt]
__magic_name__ : Dict = jax.random.split(jax.random.PRNGKey(0 ) , _A )
__magic_name__ , __magic_name__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , )
__magic_name__ : Union[str, Any] = replicate(_A )
__magic_name__ : Tuple = pipeline.prepare_inputs(_A )
__magic_name__ : Union[str, Any] = shard(_A )
__magic_name__ : str = pipeline(_A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__magic_name__ : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__magic_name__ , __magic_name__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , )
__magic_name__ : List[Any] = replicate(_A )
__magic_name__ : Optional[int] = pipeline.prepare_inputs(_A )
__magic_name__ : Tuple = shard(_A )
__magic_name__ : Dict = pipeline(_A , _A , _A , jit=_A ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__magic_name__ : Dict = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2 | 561 | 1 |
from ...processing_utils import ProcessorMixin
class _lowercase ( __a ):
_lowercase : List[str] = 'WhisperFeatureExtractor'
_lowercase : Optional[int] = 'WhisperTokenizer'
def __init__( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(A__ , A__ )
A_ = self.feature_extractor
A_ = False
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[Any]=True ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ )
def __call__( self : int , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
A_ = kwargs.pop('''audio''' , A__ )
A_ = kwargs.pop('''sampling_rate''' , A__ )
A_ = kwargs.pop('''text''' , A__ )
if len(A__ ) > 0:
A_ = args[0]
A_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A_ = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
A_ = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self : Tuple , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*A__ , **A__ )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str]="np" ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
| 700 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""yjernite/retribert-base-uncased""": 512,
}
__lowercase = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class _lowercase ( __lowerCamelCase ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = RetriBertTokenizer
_lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]="[UNK]" , lowerCamelCase__ : Optional[Any]="[SEP]" , lowerCamelCase__ : List[Any]="[PAD]" , lowerCamelCase__ : Tuple="[CLS]" , lowerCamelCase__ : List[Any]="[MASK]" , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str=None , **lowerCamelCase__ : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
A_ = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**lowerCamelCase__ )
A_ = do_lower_case
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A_ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 563 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = (DPMSolverSinglestepScheduler,)
lowerCamelCase__ = (('''num_inference_steps''', 25),)
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
snake_case__ : str = dict(self.forward_default_kwargs )
snake_case__ : Optional[int] = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.dummy_sample
snake_case__ : Dict = 0.1 * sample
snake_case__ : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ : Dict = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
snake_case__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
snake_case__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ , snake_case__ : Dict = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
snake_case__ : List[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Optional[int] = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = dict(self.forward_default_kwargs )
snake_case__ : Tuple = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.dummy_sample
snake_case__ : Tuple = 0.1 * sample
snake_case__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Dict = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
if scheduler is None:
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : List[str] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : Union[str, Any] = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : int = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case__ : str = 5_0
snake_case__ : str = self.dummy_model()
snake_case__ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def __UpperCamelCase ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case__ : Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case__ : List[Any] = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
snake_case__ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case__ : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __UpperCamelCase ( self ):
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type="""dpmsolver++""" , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __UpperCamelCase ( self ):
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
self.check_over_configs(variance_type="""learned_range""" )
def __UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.full_loop()
snake_case__ : str = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.full_loop(use_karras_sigmas=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.full_loop(prediction_type="""v_prediction""" )
snake_case__ : str = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : int = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
snake_case__ : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = 1_0
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 38 |
"""simple docstring"""
import math
import unittest
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a_ ( unittest.TestCase ):
def _snake_case ( self : int ) ->Optional[int]:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main() | 555 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCAmelCase ( ) -> Union[str, Any]:
raise RuntimeError("CUDA out of memory." )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Optional[Any] ) -> Any:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__lowerCAmelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: List[str] ) -> int:
'''simple docstring'''
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCAmelCase: Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCAmelCase , [128, 64, 32, 16, 8] )
def _UpperCAmelCase ( self: List[str] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCAmelCase: Any , __lowerCAmelCase: Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function("hello" )
self.assertListEqual(__lowerCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def _UpperCAmelCase ( self: Optional[int] ) -> Dict:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCAmelCase: Any ):
pass
with self.assertRaises(__lowerCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCAmelCase: Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _UpperCAmelCase ( self: str ) -> str:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCAmelCase: Dict , __lowerCAmelCase: Dict , __lowerCAmelCase: int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCAmelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def _UpperCAmelCase ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCAmelCase: Optional[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__lowerCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCAmelCase )
__UpperCAmelCase = release_memory(__lowerCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCAmelCase )
| 286 | def __lowerCAmelCase ( A_ : str ) -> Any:
if not head:
return True
# split the list to two parts
__UpperCAmelCase , __UpperCAmelCase = head.next, head
while fast and fast.next:
__UpperCAmelCase = fast.next.next
__UpperCAmelCase = slow.next
__UpperCAmelCase = slow.next
__UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
__UpperCAmelCase = None
while second:
__UpperCAmelCase = second.next
__UpperCAmelCase = node
__UpperCAmelCase = second
__UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__UpperCAmelCase = node.next
__UpperCAmelCase = head.next
return True
def __lowerCAmelCase ( A_ : Optional[Any] ) -> Optional[Any]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__UpperCAmelCase = __UpperCAmelCase = __UpperCAmelCase = head
while fast and fast.next:
__UpperCAmelCase , __UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
__UpperCAmelCase = [slow.val]
while slow.next:
__UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__UpperCAmelCase = cur.next
return True
def __lowerCAmelCase ( A_ : int ) -> Optional[int]:
if not head or not head.next:
return True
__UpperCAmelCase = {}
__UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(A_ )
else:
__UpperCAmelCase = [pos]
__UpperCAmelCase = head.next
pos += 1
__UpperCAmelCase = pos - 1
__UpperCAmelCase = 0
for v in d.values():
if len(A_ ) % 2 != 0:
middle += 1
else:
__UpperCAmelCase = 0
for i in range(0 , len(A_ ) ):
if v[i] + v[len(A_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 286 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : str = VideoToVideoSDPipeline
_lowercase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
_lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
_lowercase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
_lowercase : Any = False
# No `output_type`.
_lowercase : Tuple = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_lowerCAmelCase = CLIPTextModel(_lowercase )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _lowercase ( self , _lowercase , _lowercase=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
if str(_lowercase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_lowercase )
else:
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = VideoToVideoSDPipeline(**_lowercase )
_lowerCAmelCase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = self.get_dummy_inputs(_lowercase )
_lowerCAmelCase = """np"""
_lowerCAmelCase = sd_pipe(**_lowercase ).frames
_lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_lowerCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowercase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = torch.randn((1, 10, 3, 1_024, 576) , generator=_lowercase )
_lowerCAmelCase = video.to("""cuda""" )
_lowerCAmelCase = """Spiderman is surfing"""
_lowerCAmelCase = pipe(_lowercase , video=_lowercase , generator=_lowercase , num_inference_steps=3 , output_type="""pt""" ).frames
_lowerCAmelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _UpperCamelCase( unittest.TestCase ):
def __init__( self : Dict , _lowerCamelCase : str , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : int = 32 , _lowerCamelCase : bool = True , _lowerCamelCase : Union[int, float] = 1 / 2_55 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _lowerCamelCase : Optional[Union[float, List[float]]] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _lowerCamelCase : bool = True , _lowerCamelCase : Dict=7 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : Optional[Any]=4_00 , _lowerCamelCase : Dict=3 , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : int = do_resize
_UpperCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 2_88}
_UpperCAmelCase : str = size_divisor
_UpperCAmelCase : Dict = do_rescale
_UpperCAmelCase : str = rescale_factor
_UpperCAmelCase : List[Any] = do_normalize
_UpperCAmelCase : List[str] = do_center_crop
_UpperCAmelCase : Any = image_mean
_UpperCAmelCase : Optional[int] = image_std
_UpperCAmelCase : Any = do_pad
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : int = max_resolution
def a__ ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a__ ( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict=False ):
if not batched:
_UpperCAmelCase : str = self.size["shortest_edge"]
_UpperCAmelCase : int = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
_UpperCAmelCase : str = image.size
else:
_UpperCAmelCase : Dict = image.shape[1], image.shape[2]
_UpperCAmelCase : Any = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
_UpperCAmelCase : Optional[Any] = size, scale * w
else:
_UpperCAmelCase : Union[str, Any] = scale * h, size
_UpperCAmelCase : Optional[Any] = int((13_33 / 8_00) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
_UpperCAmelCase : Dict = max_size / max(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = newh * scale
_UpperCAmelCase : Union[str, Any] = neww * scale
_UpperCAmelCase : Dict = int(newh + 0.5 ), int(neww + 0.5 )
_UpperCAmelCase : List[str] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_UpperCAmelCase : Optional[Any] = []
for image in image_inputs:
_UpperCAmelCase : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : Union[str, Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
_UpperCAmelCase : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__A: Tuple = BridgeTowerImageProcessor if is_vision_available() else None
def a__ ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = BridgeTowerImageProcessingTester(self )
@property
def a__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ):
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size_divisor" ) )
def a__ ( self : str ):
pass
def a__ ( self : Tuple ):
# Initialize image processor
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : Any ):
# Initialize image processor
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase : str = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
_UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : Any ):
# Initialize image processor
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Dict = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
_UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 707 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 328 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> float:
A__ : Optional[int] =0.0_0
A__ : str =0
for resistor in resistors:
if resistor <= 0:
A__ : Tuple =f'Resistor at index {index} has a negative or zero value!'
raise ValueError(snake_case_ )
first_sum += 1 / float(snake_case_ )
index += 1
return 1 / first_sum
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> float:
A__ : Optional[Any] =0.0_0
A__ : Dict =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
A__ : Union[str, Any] =f'Resistor at index {index} has a negative value!'
raise ValueError(snake_case_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416 |
from typing import Any
class a :
def __init__( self , __UpperCamelCase )-> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =data
A__ : Tuple =None
def __repr__( self )-> str:
'''simple docstring'''
return F'Node({self.data})'
class a :
def __init__( self )-> Optional[int]:
'''simple docstring'''
A__ : Tuple =None
def __iter__( self )-> Any:
'''simple docstring'''
A__ : int =self.head
while node:
yield node.data
A__ : Tuple =node.next
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self )-> str:
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __getitem__( self , __UpperCamelCase )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __UpperCamelCase , __UpperCamelCase )-> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
A__ : Any =self.head
for _ in range(__UpperCamelCase ):
A__ : int =current.next
A__ : Dict =data
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
self.insert_nth(len(self ) , __UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
self.insert_nth(0 , __UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
A__ : Tuple =Node(__UpperCamelCase )
if self.head is None:
A__ : Tuple =new_node
elif index == 0:
A__ : Dict =self.head # link new_node to head
A__ : List[Any] =new_node
else:
A__ : int =self.head
for _ in range(index - 1 ):
A__ : List[str] =temp.next
A__ : List[Any] =temp.next
A__ : Optional[Any] =new_node
def lowerCAmelCase_ ( self )-> None: # print every node data
'''simple docstring'''
print(self )
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
return self.delete_nth(0 )
def lowerCAmelCase_ ( self )-> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase_ ( self , __UpperCamelCase = 0 )-> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
A__ : Optional[Any] =self.head # default first node
if index == 0:
A__ : int =self.head.next
else:
A__ : List[str] =self.head
for _ in range(index - 1 ):
A__ : List[str] =temp.next
A__ : int =temp.next
A__ : Any =temp.next.next
return delete_node.data
def lowerCAmelCase_ ( self )-> bool:
'''simple docstring'''
return self.head is None
def lowerCAmelCase_ ( self )-> None:
'''simple docstring'''
A__ : Dict =None
A__ : Optional[int] =self.head
while current:
# Store the current node's next node.
A__ : int =current.next
# Make the current node's next point backwards
A__ : Dict =prev
# Make the previous node be the current node
A__ : Dict =current
# Make the current node the next node (to progress iteration)
A__ : List[str] =next_node
# Return prev in order to put the head at the end
A__ : List[str] =prev
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ : Union[str, Any] =LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_, i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1, 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0, 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1, 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
A__ : Dict =-i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8, 1 ) )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ : Union[str, Any] =[
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'''dlrow olleH''',
7,
5_5_5_5,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(1_0 ),
None,
None,
1_2.2_0,
]
A__ : Optional[Any] =LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ : Tuple =linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ : Union[str, Any] =linked_list.delete_tail()
assert result == 1_2.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ : Tuple =linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
A__ : Optional[Any] =LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(snake_case_ )
print('''\nReading/changing Node data using indexing:''' )
print(f'Element at Position 1: {linked_list[1]}' )
A__ : List[str] =input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(snake_case_ )
print(f'length of linked_list is : {len(snake_case_ )}' )
if __name__ == "__main__":
main()
| 416 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""MaskFormerFeatureExtractor"""]
_lowerCamelCase = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowerCamelCase = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 447 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = ["""vqvae"""]
def __init__( self : Tuple , UpperCamelCase : AutoencoderKL , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Mel , UpperCamelCase : Union[DDIMScheduler, DDPMScheduler] , )->Tuple:
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase , mel=UpperCamelCase , vqvae=UpperCamelCase )
def __snake_case ( self : List[Any] )->int:
return 5_0 if isinstance(self.scheduler , UpperCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase : int = 1 , UpperCamelCase : str = None , UpperCamelCase : np.ndarray = None , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = None , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : float = 0 , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : torch.Tensor = None , UpperCamelCase : torch.Tensor = None , UpperCamelCase : Any=True , )->Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__SCREAMING_SNAKE_CASE : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE : Any = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase , device=self.device , )
__SCREAMING_SNAKE_CASE : Any = noise
__SCREAMING_SNAKE_CASE : Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self.mel.audio_slice_to_image(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (input_image / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vqvae.encode(torch.unsqueeze(UpperCamelCase , 0 ) ).latent_dist.sample(
generator=UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : int = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : Any = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = self.unet(UpperCamelCase , UpperCamelCase , UpperCamelCase )["sample"]
else:
__SCREAMING_SNAKE_CASE : int = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
if isinstance(self.scheduler , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
else:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE : int = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE : Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : str = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__SCREAMING_SNAKE_CASE : Tuple = (images * 2_5_5).round().astype("uint8" )
__SCREAMING_SNAKE_CASE : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
__SCREAMING_SNAKE_CASE : List[str] = [self.mel.image_to_audio(UpperCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase ) )
@torch.no_grad()
def __snake_case ( self : Dict , UpperCamelCase : List[Image.Image] , UpperCamelCase : int = 5_0 )->np.ndarray:
assert isinstance(self.scheduler , UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE : Dict = (sample / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Tensor(UpperCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __snake_case ( UpperCamelCase : torch.Tensor , UpperCamelCase : torch.Tensor , UpperCamelCase : float )->torch.Tensor:
__SCREAMING_SNAKE_CASE : List[str] = acos(torch.dot(torch.flatten(UpperCamelCase ) , torch.flatten(UpperCamelCase ) ) / torch.norm(UpperCamelCase ) / torch.norm(UpperCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase ) + sin(alpha * theta ) * xa / sin(UpperCamelCase )
| 447 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Optional[int] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase__ = 1.0_54_57_18_17E-34 # unit of ℏ : J * s
lowerCamelCase__ = 3E8 # unit of c : m * s^-1
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
__snake_case = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__snake_case = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__snake_case = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a__ : List[str] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
a__ : str = parser.parse_args()
a__ : Tuple = """cpu"""
a__ : Tuple = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
a__ : Tuple = """path-to-your-trained-model"""
a__ : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a__ : int = pipe.to(device)
# to channels last
a__ : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
a__ : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
a__ : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a__ : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a__ : Any = torch.randn(2, 4, 6_4, 6_4)
a__ : Optional[int] = torch.rand(1) * 9_9_9
a__ : Dict = torch.randn(2, 7_7, 7_6_8)
a__ : str = (sample, timestep, encoder_hidden_status)
try:
a__ : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a__ : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a__ : List[str] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a__ : Any = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a__ : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a__ : List[Any] = 6_6_6
a__ : List[Any] = torch.Generator(device).manual_seed(seed)
a__ : Optional[Any] = {"""generator""": generator}
if args.steps is not None:
a__ : List[Any] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a__ : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 718 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
a__ : int = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def snake_case (UpperCamelCase : Optional[int] ):
'''simple docstring'''
if isinstance(UpperCamelCase , torch.Tensor ):
return image
elif isinstance(UpperCamelCase , PIL.Image.Image ):
lowerCamelCase__ = [image]
lowerCamelCase__ = [trans(img.convert("""RGB""" ) ) for img in image]
lowerCamelCase__ = torch.stack(UpperCamelCase )
return image
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Optional[int] , a_ : List[Any] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a_ , scheduler=a_ )
def _UpperCamelCase ( self : Any , a_ : str ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _UpperCamelCase ( self : Union[str, Any] , a_ : str , a_ : int , a_ : List[str] ):
"""simple docstring"""
lowerCamelCase__ = min(int(num_inference_steps * strength ) , a_ )
lowerCamelCase__ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self : List[str] , a_ : Dict , a_ : Dict , a_ : Optional[Any] , a_ : str , a_ : int , a_ : List[Any]=None ):
"""simple docstring"""
if not isinstance(a_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a_ )}''' )
lowerCamelCase__ = image.to(device=a_ , dtype=a_ )
if isinstance(a_ , a_ ) and len(a_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ = init_latents.shape
lowerCamelCase__ = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
# get latents
print("""add noise to latents at timestep""" , a_ )
lowerCamelCase__ = self.scheduler.add_noise(a_ , a_ , a_ )
lowerCamelCase__ = init_latents
return latents
@torch.no_grad()
def __call__( self : Union[str, Any] , a_ : Union[torch.FloatTensor, PIL.Image.Image] = None , a_ : float = 0.8 , a_ : int = 1 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : float = 0.0 , a_ : int = 50 , a_ : Optional[bool] = None , a_ : Optional[str] = "pil" , a_ : bool = True , ):
"""simple docstring"""
self.check_inputs(a_ )
# 2. Preprocess image
lowerCamelCase__ = preprocess(a_ )
# 3. set timesteps
self.scheduler.set_timesteps(a_ , device=self.device )
lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(a_ , a_ , self.device )
lowerCamelCase__ = timesteps[:1].repeat(a_ )
# 4. Prepare latent variables
lowerCamelCase__ = self.prepare_latents(a_ , a_ , a_ , self.unet.dtype , self.device , a_ )
lowerCamelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(a_ ):
# 1. predict noise model_output
lowerCamelCase__ = self.unet(a_ , a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(
a_ , a_ , a_ , eta=a_ , use_clipped_model_output=a_ , generator=a_ , ).prev_sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a_ )
| 235 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase_ : Tuple = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase_ : Union[str, Any] = '''UperNetConfig'''
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
__snake_case = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
__snake_case = nn.ReLU()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = self.conv(__SCREAMING_SNAKE_CASE )
__snake_case = self.batch_norm(__SCREAMING_SNAKE_CASE )
__snake_case = self.activation(__SCREAMING_SNAKE_CASE )
return output
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = input
for layer in self.layers:
__snake_case = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = pool_scales
__snake_case = align_corners
__snake_case = in_channels
__snake_case = channels
__snake_case = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[torch.Tensor]:
'''simple docstring'''
__snake_case = []
for ppm in self.blocks:
__snake_case = ppm(__SCREAMING_SNAKE_CASE )
__snake_case = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case = config
__snake_case = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case = in_channels
__snake_case = config.hidden_size
__snake_case = False
__snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case = nn.ModuleList()
__snake_case = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
__snake_case = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
__snake_case = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.apply(self._init_weights )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = inputs[-1]
__snake_case = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
__snake_case = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
__snake_case = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
__snake_case = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case = laterals[i - 1].shape[2:]
__snake_case = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__snake_case = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__snake_case = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
__snake_case = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
__snake_case = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ) -> None:
'''simple docstring'''
super().__init__()
__snake_case = config
__snake_case = config.auxiliary_in_channels
__snake_case = config.auxiliary_channels
__snake_case = config.auxiliary_num_convs
__snake_case = config.auxiliary_concat_input
__snake_case = in_index
__snake_case = (kernel_size // 2) * dilation
__snake_case = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
__snake_case = nn.Identity()
else:
__snake_case = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
__snake_case = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
__snake_case = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.apply(self._init_weights )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
__snake_case = encoder_hidden_states[self.in_index]
__snake_case = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
__snake_case = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = UperNetConfig
__lowercase : Optional[int] = '''pixel_values'''
__lowercase : Tuple = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = value
UpperCAmelCase_ : Union[str, Any] = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase_ : int = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , __lowerCAmelCase , )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
__snake_case = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> Union[tuple, SemanticSegmenterOutput]:
'''simple docstring'''
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
__snake_case = outputs.feature_maps
__snake_case = self.decode_head(__SCREAMING_SNAKE_CASE )
__snake_case = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
__snake_case = None
if self.auxiliary_head is not None:
__snake_case = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
__snake_case = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
__snake_case = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__snake_case = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case = (logits,) + outputs[1:]
else:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 24 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : int = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'roc_bert'
def __init__( self : str , lowerCamelCase : Optional[Any]=3_05_22 , lowerCamelCase : List[Any]=7_68 , lowerCamelCase : int=12 , lowerCamelCase : str=12 , lowerCamelCase : Any=30_72 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int=5_12 , lowerCamelCase : List[Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : str=1E-12 , lowerCamelCase : int=True , lowerCamelCase : Optional[Any]=0 , lowerCamelCase : List[Any]="absolute" , lowerCamelCase : str=None , lowerCamelCase : str=True , lowerCamelCase : Tuple=True , lowerCamelCase : str=7_68 , lowerCamelCase : str=9_10 , lowerCamelCase : Tuple=5_12 , lowerCamelCase : List[str]=2_48_58 , lowerCamelCase : Dict=True , **lowerCamelCase : Tuple , ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Optional[int] = layer_norm_eps
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : List[Any] = enable_pronunciation
lowerCAmelCase_ : List[Any] = enable_shape
lowerCAmelCase_ : List[str] = pronunciation_embed_dim
lowerCAmelCase_ : List[str] = pronunciation_vocab_size
lowerCAmelCase_ : Dict = shape_embed_dim
lowerCAmelCase_ : Optional[Any] = shape_vocab_size
lowerCAmelCase_ : List[Any] = concat_input
lowerCAmelCase_ : Any = position_embedding_type
lowerCAmelCase_ : List[str] = classifier_dropout
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
| 275 | 0 |
def _a ( UpperCamelCase_ : int ) -> list:
"""simple docstring"""
lowerCAmelCase__ = int(UpperCamelCase_ )
if n_element < 1:
lowerCAmelCase__ = ValueError("a should be a positive number" )
raise my_error
lowerCAmelCase__ = [1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (0, 0, 0)
lowerCAmelCase__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a_ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
a_ = hamming(int(n))
print('''-----------------------------------------------------''')
print(F"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''')
| 115 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , )-> Any:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = NystromformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = NystromformerForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = NystromformerForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = NystromformerForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = NystromformerForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = NystromformerForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ =(
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ =False
a_ =False
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = NystromformerModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = NystromformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase__ = model(__UpperCAmelCase )[0]
lowerCAmelCase__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "the [MASK] of Belgium is Brussels"
lowerCAmelCase__ = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(encoding.input_ids ).logits
lowerCAmelCase__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__UpperCAmelCase ) , "capital" )
| 115 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: Optional[int] = len(_UpperCamelCase )
_lowercase: Tuple = sum(_UpperCamelCase )
_lowercase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase: Optional[int] = True
for i in range(1 , s + 1 ):
_lowercase: List[str] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase: Dict = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase: int = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase: List[Any] = s - 2 * j
break
return diff
| 353 |
"""simple docstring"""
from collections.abc import Callable
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: float = a
_lowercase: float = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
_lowercase: float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
_lowercase: Union[str, Any] = mid
else:
_lowercase: Any = mid
_lowercase: List[Any] = start + (end - start) / 2.0
return mid
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 353 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( snake_case__ ):
A_ : Union[str, Any] = np.shape(snake_case__ )
if rows != columns:
A_ : List[Any] = (
"""'table' has to be of square shaped array but got a """
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(snake_case__ )
A_ : List[str] = np.zeros((rows, columns) )
A_ : Union[str, Any] = np.zeros((rows, columns) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
A_ : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A_ : List[str] = (table[i][j] - total) / upper[j][j]
A_ : List[Any] = 1
for j in range(snake_case__ , snake_case__ ):
A_ : Union[str, Any] = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
A_ : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
_lowerCAmelCase = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
_lowerCAmelCase = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_lowerCAmelCase = BeautifulSoup(res.text, "html.parser")
_lowerCAmelCase = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 480 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase__ : Optional[int] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ :Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ :List[str] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ :Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "LABEL_0", "score": 0.504}] )
lowerCAmelCase__ = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
lowerCAmelCase__ = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
lowerCAmelCase__ = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
lowerCAmelCase__ = text_classifier("This is great !" , return_all_scores=__magic_name__ )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "LABEL_0", "score": 0.504}] )
lowerCAmelCase__ = text_classifier("This is great !" , return_all_scores=__magic_name__ )
self.assertEqual(
nested_simplify(__magic_name__ ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
lowerCAmelCase__ = text_classifier(["This is great !", "Something else"] , return_all_scores=__magic_name__ )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
lowerCAmelCase__ = text_classifier(["This is great !", "Something else"] , return_all_scores=__magic_name__ )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
import torch
lowerCAmelCase__ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = pipeline("text-classification" )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "POSITIVE", "score": 1.0}] )
lowerCAmelCase__ = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowerCAmelCase__ = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline("text-classification" , framework="tf" )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "POSITIVE", "score": 1.0}] )
lowerCAmelCase__ = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowerCAmelCase__ = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": "POSITIVE", "score": 0.988}] )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = TextClassificationPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ = "HuggingFace is in"
lowerCAmelCase__ = text_classifier(__magic_name__ )
self.assertEqual(nested_simplify(__magic_name__ ) , [{"label": ANY(__magic_name__ ), "score": ANY(__magic_name__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
lowerCAmelCase__ = ["HuggingFace is in ", "Paris is in France"]
lowerCAmelCase__ = text_classifier(__magic_name__ )
self.assertEqual(
nested_simplify(__magic_name__ ) , [{"label": ANY(__magic_name__ ), "score": ANY(__magic_name__ )}, {"label": ANY(__magic_name__ ), "score": ANY(__magic_name__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ = text_classifier(__magic_name__ , top_k=__magic_name__ )
lowerCAmelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__magic_name__ ) , [[{"label": ANY(__magic_name__ ), "score": ANY(__magic_name__ )}] * N, [{"label": ANY(__magic_name__ ), "score": ANY(__magic_name__ )}] * N] , )
lowerCAmelCase__ = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
lowerCAmelCase__ = text_classifier(__magic_name__ )
self.assertEqual(
nested_simplify(__magic_name__ ) , {"label": ANY(__magic_name__ ), "score": ANY(__magic_name__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__magic_name__ ):
text_classifier(__magic_name__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [{"label": ANY(__magic_name__ ), "score": ANY(__magic_name__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 48 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__SCREAMING_SNAKE_CASE : int =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__SCREAMING_SNAKE_CASE : str ={
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __a ):
_A :List[str] = VOCAB_FILES_NAMES
_A :Dict = PRETRAINED_VOCAB_FILES_MAP
_A :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A :List[Any] = BlenderbotSmallTokenizer
def __init__( self : List[Any] , snake_case__ : Optional[Any]=None , snake_case__ : Any=None , snake_case__ : List[Any]="<|endoftext|>" , snake_case__ : Union[str, Any]="<|endoftext|>" , snake_case__ : Optional[Any]="<|endoftext|>" , snake_case__ : int=False , snake_case__ : List[str]=True , **snake_case__ : List[Any] , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int , snake_case__ : List[Any]=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 428 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 421 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : List[Any] = '▁'
_snake_case : Tuple = {'vocab_file': 'spiece.model'}
_snake_case : Optional[int] = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_snake_case : Union[str, Any] = {
'google/reformer-crime-and-punishment': 524288,
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : List[Any]=[] , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Any , ) -> None:
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowercase ( self : Any ) -> Any:
return self.sp_model.get_piece_size()
def lowercase ( self : int ) -> Dict[str, int]:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : int , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : Dict ) -> Tuple:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def lowercase ( self : Any , lowerCAmelCase_ : int ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = []
__lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 421 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = RemBertConfig.from_json_file(_UpperCamelCase )
print("Building PyTorch model from configuration: {}".format(str(_UpperCamelCase ) ) )
__lowerCAmelCase = RemBertModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_UpperCamelCase ) )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : List[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 636 |
"""simple docstring"""
from __future__ import annotations
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a=None ):
__lowerCAmelCase = data
__lowerCAmelCase = None
def __repr__( self ):
__lowerCAmelCase = []
__lowerCAmelCase = self
while temp:
string_rep.append(f"{temp.data}" )
__lowerCAmelCase = temp.next
return "->".join(__a )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not elements_list:
raise Exception("The Elements List is empty" )
__lowerCAmelCase = __lowerCAmelCase = Node(elements_list[0] )
for i in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = Node(elements_list[i] )
__lowerCAmelCase = current.next
return head
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if head_node is not None and isinstance(_UpperCamelCase , _UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def _lowerCamelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
__lowerCAmelCase = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(_UpperCamelCase )
print("Elements in Reverse:" )
print_reverse(_UpperCamelCase )
if __name__ == "__main__":
main()
| 636 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {"vocab_file": "spiece.model"}
UpperCamelCase : Tuple = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
UpperCamelCase : str = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=False , _lowercase : Union[str, Any]=False , _lowercase : Union[str, Any]=False , _lowercase : Dict=None , _lowercase : Optional[Any]=None , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : Any , ):
A = {} if sp_model_kwargs is None else sp_model_kwargs
A = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
A = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A = '<|endoftext|>' if eos_token is None else eos_token
A = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A = unk_token if pad_token is None else pad_token
A = eos_token if bos_token is None else bos_token
else:
A = '<pad>' if pad_token is None else pad_token
A = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# Used for whitespace normalization in input texts
# fmt : off
A = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A = re.compile(
f'[{"".join(map(_lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self : Any ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Tuple , _lowercase : Tuple ):
A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __a ( self : Union[str, Any] ):
return len(self.sp_model )
def __a ( self : Any , _lowercase : str ):
A = self.non_printing_characters_re.sub('' , _lowercase )
# Normalize whitespaces
A = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
A = unicodedata.normalize('NFC' , _lowercase )
return text
def __a ( self : Dict , _lowercase : str , **_lowercase : Union[str, Any] ):
A = self.preprocess_text(_lowercase )
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def __a ( self : str , _lowercase : str ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : int , _lowercase : int ):
return self.sp_model.IdToPiece(_lowercase )
@staticmethod
def __a ( _lowercase : str ):
return out_string
def __a ( self : Optional[Any] , _lowercase : List[str] ):
A = []
A = ''
A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowercase ) + token
A = True
A = []
else:
current_sub_tokens.append(_lowercase )
A = False
out_string += self.sp_model.decode(_lowercase )
return out_string
def __a ( self : List[Any] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , 'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def __a ( self : List[Any] , _lowercase : Union[str, List[str]] , _lowercase : Union[str, bool] = False ):
if isinstance(_lowercase , _lowercase ):
A = self.preprocess_text(_lowercase )
A = self.sp_model.encode(_lowercase )
else:
A = [self.preprocess_text(_lowercase ) for t in text]
A = self.sp_model.encode(_lowercase )
if return_tensors is True or return_tensors == "pt":
A = torch.tensor(_lowercase )
return token_ids
def __a ( self : str , _lowercase : Union[int, List[int]] ):
return self.sp_model.decode(_lowercase )
def __a ( self : str , _lowercase : "Conversation" ):
A = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
A = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(_lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=_lowercase )
| 717 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : int ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=_lowercase , )
assert hasattr(self , 'env' )
def __a ( self : Optional[int] , _lowercase : int ):
A = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
A = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_lowercase , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_lowercase , py_version='py36' , )
def __a ( self : Tuple , _lowercase : List[str] ):
TrainingJobAnalytics(_lowercase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __a ( self : List[Any] , _lowercase : Union[str, Any] ):
# create estimator
A = self.create_estimator(_lowercase )
# run training
estimator.fit()
# result dataframe
A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _lowercase )
| 91 | 0 |
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(a_ , a_ ):
return 0
elif n == 2:
return 1
else:
__A = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = 2
while digits < n:
index += 1
__A = len(str(fibonacci(a_ ) ) )
return index
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(a_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ (metaclass=lowercase__ ):
"""simple docstring"""
_lowerCamelCase = ["""torch""", """scipy"""]
def __init__( self ,*lowercase ,**lowercase):
"""simple docstring"""
requires_backends(self ,["torch", "scipy"])
@classmethod
def A_ ( cls ,*lowercase ,**lowercase):
"""simple docstring"""
requires_backends(cls ,["torch", "scipy"])
@classmethod
def A_ ( cls ,*lowercase ,**lowercase):
"""simple docstring"""
requires_backends(cls ,["torch", "scipy"])
| 455 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = AltDiffusionPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ ( self):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowercase ,set_alpha_to_one=lowercase ,)
torch.manual_seed(0)
UpperCAmelCase_ : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
UpperCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
UpperCAmelCase_ : Dict = CLIPTextModel(lowercase)
UpperCAmelCase_ : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
UpperCAmelCase_ : List[str] = 77
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self ,lowercase ,lowercase=0):
"""simple docstring"""
if str(lowercase).startswith("mps"):
UpperCAmelCase_ : Any = torch.manual_seed(lowercase)
else:
UpperCAmelCase_ : Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
UpperCAmelCase_ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A_ ( self):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3)
def A_ ( self):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
torch.manual_seed(0)
UpperCAmelCase_ : List[Any] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Any = RobertaSeriesModelWithTransformation(lowercase)
UpperCAmelCase_ : Optional[Any] = text_encoder
UpperCAmelCase_ : List[Any] = AltDiffusionPipeline(**lowercase)
UpperCAmelCase_ : Union[str, Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(lowercase)
UpperCAmelCase_ : Optional[Any] = "A photo of an astronaut"
UpperCAmelCase_ : str = alt_pipe(**lowercase)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[Any] = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase)
torch.manual_seed(0)
UpperCAmelCase_ : Any = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(lowercase)
UpperCAmelCase_ : Dict = text_encoder
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline(**lowercase)
UpperCAmelCase_ : List[Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : str = self.get_dummy_inputs(lowercase)
UpperCAmelCase_ : Union[str, Any] = alt_pipe(**lowercase)
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,safety_checker=lowercase)
UpperCAmelCase_ : Any = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Any = torch.manual_seed(0)
UpperCAmelCase_ : Optional[int] = alt_pipe([prompt] ,generator=lowercase ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="np")
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Dict = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" ,subfolder="scheduler")
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,scheduler=lowercase ,safety_checker=lowercase)
UpperCAmelCase_ : List[str] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : str = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0)
UpperCAmelCase_ : List[str] = alt_pipe([prompt] ,generator=lowercase ,num_inference_steps=2 ,output_type="numpy")
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : List[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 455 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 97 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] )-> List[str]:
A__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _UpperCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionLatentUpscalePipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([] )
UpperCamelCase__ = True
@property
def snake_case_ ( self):
A__ = 1
A__ = 4
A__ = (1_6, 1_6)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(a__)
return image
def snake_case_ ( self):
torch.manual_seed(0)
A__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=a__ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=a__ , only_cross_attention=a__ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
A__ = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
A__ = EulerDiscreteScheduler(prediction_type='''sample''')
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''quick_gelu''' , projection_dim=5_1_2 , )
A__ = CLIPTextModel(a__)
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
A__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def snake_case_ ( self , a__ , a__=0):
if str(a__).startswith('''mps'''):
A__ = torch.manual_seed(a__)
else:
A__ = torch.Generator(device=a__).manual_seed(a__)
A__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self):
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
A__ = self.get_dummy_inputs(a__)
A__ = pipe(**a__).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3))
A__ = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5])
A__ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a__ , 1e-3)
def snake_case_ ( self):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def snake_case_ ( self):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def snake_case_ ( self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def snake_case_ ( self):
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def snake_case_ ( self):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def snake_case_ ( self):
super().test_save_load_local(expected_max_difference=3e-3)
def snake_case_ ( self):
super().test_save_load_optional_components(expected_max_difference=3e-3)
def snake_case_ ( self):
A__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**a__)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
A__ = self.get_dummy_inputs(a__)
A__ = 2
A__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
A__ = getattr(a__ , scheduler_enum.name)
A__ = scheduler_cls.from_config(pipe.scheduler.config)
A__ = pipe(**a__)[0]
outputs.append(a__)
assert check_same_shape(a__)
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
A__ = torch.manual_seed(3_3)
A__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa)
pipe.to('''cuda''')
A__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa)
upscaler.to('''cuda''')
A__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
A__ = pipe(a__ , generator=a__ , output_type='''latent''').images
A__ = upscaler(
prompt=a__ , image=a__ , num_inference_steps=2_0 , guidance_scale=0 , generator=a__ , output_type='''np''' , ).images[0]
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''')
assert np.abs((expected_image - image).mean()) < 5e-2
def snake_case_ ( self):
A__ = torch.manual_seed(3_3)
A__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa)
upscaler.to('''cuda''')
A__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''')
A__ = upscaler(
prompt=a__ , image=a__ , num_inference_steps=2_0 , guidance_scale=0 , generator=a__ , output_type='''np''' , ).images[0]
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''')
assert np.abs((expected_image - image).max()) < 5e-2
| 632 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowerCamelCase ( lowerCamelCase__ : List[str] ):
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def _lowerCamelCase ( lowerCamelCase__ : str ):
for char in word:
lowercase__ : int = ord(SCREAMING_SNAKE_CASE__ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ):
return 0
return 1
def _lowerCamelCase ( lowerCamelCase__ : List[str] ):
lowercase__ : Any = set()
for token in tokens:
lowercase__ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE__ )
lowercase__ : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
return word_list
def _lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : set() ):
if not chinese_word_set:
return bert_tokens
lowercase__ : Dict = max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] )
lowercase__ : Optional[Any] = bert_tokens
lowercase__ : int = 0, len(SCREAMING_SNAKE_CASE__ )
while start < end:
lowercase__ : Union[str, Any] = True
if is_chinese(bert_word[start] ):
lowercase__ : Dict = min(end - start , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ):
lowercase__ : str = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase__ : List[str] = """##""" + bert_word[j]
lowercase__ : Tuple = start + i
lowercase__ : str = False
break
if single_word:
start += 1
return bert_word
def _lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : LTP , lowerCamelCase__ : BertTokenizer ):
lowercase__ : Optional[Any] = []
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_00 ):
lowercase__ : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["""cws"""] ).cws
lowercase__ : List[Any] = [get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
lowercase__ : Dict = []
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_00 ):
lowercase__ : Union[str, Any] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
lowercase__ : Dict = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase__ : Optional[Any] = []
for id in input_ids:
lowercase__ : Optional[int] = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ )
input_tokens.append(SCREAMING_SNAKE_CASE__ )
lowercase__ : Optional[int] = add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase__ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
if token[:2] == "##":
lowercase__ : str = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ):
ref_id.append(SCREAMING_SNAKE_CASE__ )
ref_ids.append(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
return ref_ids
def _lowerCamelCase ( lowerCamelCase__ : Any ):
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase__ : Optional[Any] = f.readlines()
lowercase__ : Optional[Any] = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase__ : Tuple = LTP(args.ltp ) # faster in GPU device
lowercase__ : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
lowercase__ : Optional[int] = prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase__ : Union[str, Any] = [json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__snake_case = parser.parse_args()
main(args) | 704 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
__snake_case = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
__snake_case = {F"funnel-transformer/{name}": 512 for name in _model_names}
__snake_case = {F"funnel-transformer/{name}": {'do_lower_case': True} for name in _model_names}
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Union[str, Any] = VOCAB_FILES_NAMES
_a : Any = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_INIT_CONFIGURATION
_a : List[Any] = FunnelTokenizer
_a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = 2
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__="##" , **lowerCamelCase__ , ) -> Union[str, Any]:
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , clean_text=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , wordpieces_prefix=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowercase__ : List[str] = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowercase__ : Optional[Any] = do_lower_case
lowercase__ : Union[str, Any] = strip_accents
lowercase__ : Optional[Any] = tokenize_chinese_chars
lowercase__ : Union[str, Any] = normalizer_class(**lowerCamelCase__ )
lowercase__ : Union[str, Any] = do_lower_case
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple:
lowercase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
lowercase__ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ ) | 128 | 0 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def snake_case_ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def snake_case_ ( A_ : Tuple, A_ : List[Any], A_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(A_, ['''torch'''] )
_check_torch_version()
_lowerCamelCase : Optional[Any] = image_tensor.unsqueeze(0 )
_lowerCamelCase : Optional[Any] = torch.nn.functional.unfold(A_, (patch_height, patch_width), stride=(patch_height, patch_width) )
_lowerCamelCase : Tuple = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), A_, A_, -1 )
_lowerCamelCase : str = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def snake_case_ ( A_ : str, A_ : int = 36, A_ : str = "black", A_ : str = "white", A_ : int = 5, A_ : int = 5, A_ : int = 5, A_ : int = 5, A_ : Optional[bytes] = None, A_ : Optional[str] = None, ):
'''simple docstring'''
requires_backends(A_, '''vision''' )
# Add new lines so that each line is no more than 80 characters.
_lowerCamelCase : Optional[Any] = textwrap.TextWrapper(width=80 )
_lowerCamelCase : int = wrapper.wrap(text=A_ )
_lowerCamelCase : List[Any] = '''\n'''.join(A_ )
if font_bytes is not None and font_path is None:
_lowerCamelCase : Optional[int] = io.BytesIO(A_ )
elif font_path is not None:
_lowerCamelCase : Union[str, Any] = font_path
else:
_lowerCamelCase : str = hf_hub_download(A_, '''Arial.TTF''' )
_lowerCamelCase : Optional[Any] = ImageFont.truetype(A_, encoding='''UTF-8''', size=A_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCamelCase : Union[str, Any] = ImageDraw.Draw(Image.new('''RGB''', (1, 1), A_ ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = temp_draw.textbbox((0, 0), A_, A_ )
# Create the actual image with a bit of padding around the text.
_lowerCamelCase : int = text_width + left_padding + right_padding
_lowerCamelCase : Dict = text_height + top_padding + bottom_padding
_lowerCamelCase : int = Image.new('''RGB''', (image_width, image_height), A_ )
_lowerCamelCase : Optional[Any] = ImageDraw.Draw(A_ )
draw.text(xy=(left_padding, top_padding), text=A_, fill=A_, font=A_ )
return image
def snake_case_ ( A_ : np.ndarray, A_ : str, **A_ : Optional[int] ):
'''simple docstring'''
requires_backends(A_, '''vision''' )
# Convert to PIL image if necessary
_lowerCamelCase : List[str] = to_pil_image(A_ )
_lowerCamelCase : Union[str, Any] = render_text(A_, **A_ )
_lowerCamelCase : int = max(header_image.width, image.width )
_lowerCamelCase : List[Any] = int(image.height * (new_width / image.width) )
_lowerCamelCase : Dict = int(header_image.height * (new_width / header_image.width) )
_lowerCamelCase : List[Any] = Image.new('''RGB''', (new_width, new_height + new_header_height), '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCamelCase : Optional[int] = to_numpy_array(A_ )
if infer_channel_dimension_format(A_ ) == ChannelDimension.LAST:
_lowerCamelCase : int = to_channel_dimension_format(A_, ChannelDimension.LAST )
return new_image
class __snake_case ( _lowercase):
snake_case__ : str = ["flattened_patches"]
def __init__( self : str , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 2_0_4_8 , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : Dict = do_convert_rgb
_lowerCamelCase : Union[str, Any] = max_patches
_lowerCamelCase : Any = is_vqa
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : dict , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
_lowerCamelCase : Any = to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.FIRST )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = patch_size['''height'''], patch_size['''width''']
_lowerCamelCase , _lowerCamelCase : Dict = get_image_size(__lowerCAmelCase )
# maximize scale s.t.
_lowerCamelCase : Any = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowerCamelCase : str = max(min(math.floor(scale * image_height / patch_height ) , __lowerCAmelCase ) , 1 )
_lowerCamelCase : Optional[int] = max(min(math.floor(scale * image_width / patch_width ) , __lowerCAmelCase ) , 1 )
_lowerCamelCase : List[Any] = max(num_feasible_rows * patch_height , 1 )
_lowerCamelCase : str = max(num_feasible_cols * patch_width , 1 )
_lowerCamelCase : Optional[Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__lowerCAmelCase , antialias=__lowerCAmelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCamelCase : int = torch_extract_patches(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = patches.shape
_lowerCamelCase : int = patches_shape[1]
_lowerCamelCase : Union[str, Any] = patches_shape[2]
_lowerCamelCase : Optional[int] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCamelCase : List[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowerCamelCase : List[Any] = torch.arange(__lowerCAmelCase ).reshape([rows, 1] ).repeat(1 , __lowerCAmelCase ).reshape([rows * columns, 1] )
_lowerCamelCase : Any = torch.arange(__lowerCAmelCase ).reshape([1, columns] ).repeat(__lowerCAmelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCamelCase : int = row_ids.to(torch.floataa )
_lowerCamelCase : str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase : Any = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase : Any = torch.nn.functional.pad(__lowerCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowerCamelCase : Any = to_numpy_array(__lowerCAmelCase )
return result
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict ):
"""simple docstring"""
if image.dtype == np.uinta:
_lowerCamelCase : Tuple = image.astype(np.floataa )
# take mean across the whole `image`
_lowerCamelCase : List[str] = np.mean(__lowerCAmelCase )
_lowerCamelCase : Dict = np.std(__lowerCAmelCase )
_lowerCamelCase : Tuple = max(__lowerCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : str = patch_size if patch_size is not None else self.patch_size
_lowerCamelCase : List[Any] = max_patches if max_patches is not None else self.max_patches
_lowerCamelCase : Any = self.is_vqa
if kwargs.get('''data_format''' , __lowerCAmelCase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
_lowerCamelCase : Tuple = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Optional[Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
_lowerCamelCase : str = kwargs.pop('''font_bytes''' , __lowerCAmelCase )
_lowerCamelCase : int = kwargs.pop('''font_path''' , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = [header_text] * len(__lowerCAmelCase )
_lowerCamelCase : Dict = [
render_header(__lowerCAmelCase , header_text[i] , font_bytes=__lowerCAmelCase , font_path=__lowerCAmelCase )
for i, image in enumerate(__lowerCAmelCase )
]
if do_normalize:
_lowerCamelCase : Dict = [self.normalize(image=__lowerCAmelCase ) for image in images]
# convert to torch tensor and permute
_lowerCamelCase : Optional[int] = [
self.extract_flattened_patches(image=__lowerCAmelCase , max_patches=__lowerCAmelCase , patch_size=__lowerCAmelCase )
for image in images
]
# create attention mask in numpy
_lowerCamelCase : Union[str, Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowerCamelCase : str = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 83 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE : str = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCamelCase ( _a , _a ) -> Any:
'''simple docstring'''
if args.student_type == "roberta":
lowercase_ :List[str] = False
elif args.student_type == "gpt2":
lowercase_ :Optional[int] = False
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
if args.student_type == "roberta":
lowercase_ :int = False
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase_ :Optional[int] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=_a , required=_a , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=_a , required=_a , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=_a , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_a , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=_a , required=_a , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=_a , type=_a , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_a , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=_a , required=_a , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=_a , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=_a , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=_a , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=_a , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=_a , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=_a , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=_a , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=_a , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=_a , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=_a , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=_a , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=_a , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=_a , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=_a , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_a , default=5_0 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=_a , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_a , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=_a , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=_a , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_a , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=_a , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=_a , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=_a , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=_a , default=5_6 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=_a , default=5_0_0 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=_a , default=4_0_0_0 , help='''Checkpoint interval.''' )
lowercase_ :Union[str, Any] = parser.parse_args()
sanity_checks(_a )
# ARGS #
init_gpu_params(_a )
set_seed(_a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(_a ) , _a , indent=4 )
git_log(args.dump_path )
lowercase_ , lowercase_ , lowercase_ :Dict = MODEL_CLASSES[args.student_type]
lowercase_ , lowercase_ , lowercase_ :Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase_ :Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase_ :Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase_ :List[str] = tokenizer.all_special_tokens.index(_a )
lowercase_ :Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
lowercase_ :Dict = special_tok_ids
lowercase_ :List[str] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
lowercase_ :Tuple = pickle.load(_a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
lowercase_ :List[Any] = pickle.load(_a )
lowercase_ :Tuple = np.maximum(_a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase_ :List[Any] = 0.0 # do not predict special tokens
lowercase_ :Dict = torch.from_numpy(_a )
else:
lowercase_ :Tuple = None
lowercase_ :List[Any] = LmSeqsDataset(params=_a , data=_a )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
lowercase_ :Union[str, Any] = student_config_class.from_pretrained(args.student_config )
lowercase_ :int = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
lowercase_ :List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_a )
else:
lowercase_ :Dict = student_model_class(_a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
lowercase_ :int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_a , _a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_a , _a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase_ :Tuple = Distiller(
params=_a , dataset=_a , token_probs=_a , student=_a , teacher=_a )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 257 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __A( unittest.TestCase ):
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
_UpperCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = 20
_UpperCamelCase = self._get_uniform_logits(batch_size=2, length=A )
# tweak scores to not be uniform anymore
_UpperCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase = jax.nn.softmax(A, axis=-1 )
_UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase = jax.nn.softmax(temp_dist_warper_sharper(A, scores.copy(), cur_len=A ), axis=-1 )
_UpperCamelCase = jax.nn.softmax(temp_dist_warper_smoother(A, scores.copy(), cur_len=A ), axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min() )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = 10
_UpperCamelCase = 2
# create ramp distribution
_UpperCamelCase = np.broadcast_to(np.arange(A )[None, :], (batch_size, vocab_size) ).copy()
_UpperCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase = FlaxTopKLogitsWarper(3 )
_UpperCamelCase = top_k_warp(A, A, cur_len=A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(), 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(), 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase = 5
_UpperCamelCase = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3 )
_UpperCamelCase = np.broadcast_to(np.arange(A )[None, :], (batch_size, length) ).copy()
_UpperCamelCase = top_k_warp_safety_check(A, A, cur_len=A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(), [2, 2] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = 10
_UpperCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase = np.exp(top_p_warp(A, A, cur_len=A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(A, A, atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase = np.broadcast_to(np.arange(A )[None, :], (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0 )
_UpperCamelCase = top_p_warp(A, A, cur_len=A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(), [3, 2] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = 20
_UpperCamelCase = 4
_UpperCamelCase = 0
_UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=A )
# check that min length is applied at length 5
_UpperCamelCase = ids_tensor((batch_size, 20), vocab_size=20 )
_UpperCamelCase = 5
_UpperCamelCase = self._get_uniform_logits(A, A )
_UpperCamelCase = min_dist_processor(A, A, cur_len=A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase = self._get_uniform_logits(A, A )
_UpperCamelCase = 15
_UpperCamelCase = min_dist_processor(A, A, cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = 20
_UpperCamelCase = 4
_UpperCamelCase = 0
_UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase = ids_tensor((batch_size, 1), vocab_size=20 )
_UpperCamelCase = 1
_UpperCamelCase = self._get_uniform_logits(A, A )
_UpperCamelCase = logits_processor(A, A, cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase = 3
_UpperCamelCase = self._get_uniform_logits(A, A )
_UpperCamelCase = logits_processor(A, A, cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = 20
_UpperCamelCase = 4
_UpperCamelCase = 0
_UpperCamelCase = 5
_UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=A, eos_token_id=A )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase = ids_tensor((batch_size, 4), vocab_size=20 )
_UpperCamelCase = 4
_UpperCamelCase = self._get_uniform_logits(A, A )
_UpperCamelCase = logits_processor(A, A, cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase = 3
_UpperCamelCase = self._get_uniform_logits(A, A )
_UpperCamelCase = logits_processor(A, A, cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = 4
_UpperCamelCase = 10
_UpperCamelCase = 15
_UpperCamelCase = 2
_UpperCamelCase = 1
_UpperCamelCase = 15
# dummy input_ids and scores
_UpperCamelCase = ids_tensor((batch_size, sequence_length), A )
_UpperCamelCase = input_ids.copy()
_UpperCamelCase = self._get_uniform_logits(A, A )
_UpperCamelCase = scores.copy()
# instantiate all dist processors
_UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase = FlaxTopKLogitsWarper(3 )
_UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=A )
_UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
_UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=A, eos_token_id=A )
_UpperCamelCase = 10
# no processor list
_UpperCamelCase = temp_dist_warp(A, A, cur_len=A )
_UpperCamelCase = top_k_warp(A, A, cur_len=A )
_UpperCamelCase = top_p_warp(A, A, cur_len=A )
_UpperCamelCase = min_dist_proc(A, A, cur_len=A )
_UpperCamelCase = bos_dist_proc(A, A, cur_len=A )
_UpperCamelCase = eos_dist_proc(A, A, cur_len=A )
# with processor list
_UpperCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase = processor(A, A, cur_len=A )
# scores should be equal
self.assertTrue(jnp.allclose(A, A, atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = 4
_UpperCamelCase = 10
_UpperCamelCase = 15
_UpperCamelCase = 2
_UpperCamelCase = 1
_UpperCamelCase = 15
# dummy input_ids and scores
_UpperCamelCase = ids_tensor((batch_size, sequence_length), A )
_UpperCamelCase = input_ids.copy()
_UpperCamelCase = self._get_uniform_logits(A, A )
_UpperCamelCase = scores.copy()
# instantiate all dist processors
_UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase = FlaxTopKLogitsWarper(3 )
_UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=A )
_UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
_UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=A, eos_token_id=A )
_UpperCamelCase = 10
# no processor list
def run_no_processor_list(A, A, A ):
_UpperCamelCase = temp_dist_warp(A, A, cur_len=A )
_UpperCamelCase = top_k_warp(A, A, cur_len=A )
_UpperCamelCase = top_p_warp(A, A, cur_len=A )
_UpperCamelCase = min_dist_proc(A, A, cur_len=A )
_UpperCamelCase = bos_dist_proc(A, A, cur_len=A )
_UpperCamelCase = eos_dist_proc(A, A, cur_len=A )
return scores
# with processor list
def run_processor_list(A, A, A ):
_UpperCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase = processor(A, A, cur_len=A )
return scores
_UpperCamelCase = jax.jit(A )
_UpperCamelCase = jax.jit(A )
_UpperCamelCase = jitted_run_no_processor_list(A, A, A )
_UpperCamelCase = jitted_run_processor_list(A, A, A )
# scores should be equal
self.assertTrue(jnp.allclose(A, A, atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
| 105 |
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = 0
for ch in input_str:
_UpperCamelCase = ord(lowerCAmelCase )
_UpperCamelCase = pow(2 , lowerCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a ( snake_case__: List[Any] ):
'''simple docstring'''
if "cls_token" in name:
lowercase_ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
lowercase_ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
lowercase_ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowercase_ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase_ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase_ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
lowercase_ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowercase_ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
lowercase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowercase_ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowercase_ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowercase_ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
lowercase_ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
lowercase_ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a ( snake_case__: Optional[int] , snake_case__: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
lowercase_ = key.split('''.''' )
lowercase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowercase_ = config.decoder_hidden_size
lowercase_ = '''decoder.decoder_layers.'''
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = config.hidden_size
lowercase_ = '''vit.encoder.layer.'''
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = val
return orig_state_dict
def a ( snake_case__: str , snake_case__: int ):
'''simple docstring'''
lowercase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase_ = 1_024
lowercase_ = 4_096
lowercase_ = 24
lowercase_ = 16
elif "huge" in checkpoint_url:
lowercase_ = 14
lowercase_ = 1_280
lowercase_ = 5_120
lowercase_ = 32
lowercase_ = 16
lowercase_ = ViTMAEForPreTraining(snake_case__ )
lowercase_ = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''model''']
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
lowercase_ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = image_processor(images=snake_case__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits
if "large" in checkpoint_url:
lowercase_ = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
lowercase_ = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
lowercase_ = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 97 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCamelCase__ = 0
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCamelCase__ = tuple[int, int]
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Node | None , ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = pos_x
_UpperCAmelCase : Optional[Any] = pos_y
_UpperCAmelCase : Any = (pos_y, pos_x)
_UpperCAmelCase : Optional[Any] = goal_x
_UpperCAmelCase : Dict = goal_y
_UpperCAmelCase : Any = g_cost
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = self.calculate_heuristic()
_UpperCAmelCase : Tuple = self.g_cost + self.h_cost
def lowerCAmelCase__ ( self : List[str] ) ->float:
'''simple docstring'''
_UpperCAmelCase : Any = self.pos_x - self.goal_x
_UpperCAmelCase : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase__ ) + abs(lowerCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Any , lowerCamelCase__ : Node ) ->bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : TPosition , lowerCamelCase__ : TPosition ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [self.start]
_UpperCAmelCase : list[Node] = []
_UpperCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase__ )
self.closed_nodes.append(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = self.get_successors(lowerCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
_UpperCAmelCase : Dict = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase__ )
else:
self.open_nodes.append(lowerCamelCase__ )
return [self.start.pos]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Node ) ->list[Node]:
'''simple docstring'''
_UpperCAmelCase : int = []
for action in delta:
_UpperCAmelCase : List[str] = parent.pos_x + action[1]
_UpperCAmelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase__ , ) )
return successors
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Node | None ) ->list[TPosition]:
'''simple docstring'''
_UpperCAmelCase : Tuple = node
_UpperCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : TPosition , lowerCamelCase__ : TPosition ) ->None:
'''simple docstring'''
_UpperCAmelCase : int = AStar(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = AStar(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_UpperCAmelCase : str = self.fwd_astar.open_nodes.pop(0 )
_UpperCAmelCase : Tuple = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase__ , lowerCamelCase__ )
self.fwd_astar.closed_nodes.append(lowerCamelCase__ )
self.bwd_astar.closed_nodes.append(lowerCamelCase__ )
_UpperCAmelCase : int = current_bwd_node
_UpperCAmelCase : Optional[int] = current_fwd_node
_UpperCAmelCase : Optional[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
_UpperCAmelCase : List[str] = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase__ )
else:
astar.open_nodes.append(lowerCamelCase__ )
return [self.fwd_astar.start.pos]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Node , lowerCamelCase__ : Node ) ->list[TPosition]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase__ )
_UpperCAmelCase : int = self.bwd_astar.retrace_path(lowerCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
_UpperCAmelCase : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase__ = time.time()
lowerCamelCase__ = AStar(init, goal)
lowerCamelCase__ = a_star.search()
lowerCamelCase__ = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
lowerCamelCase__ = time.time()
lowerCamelCase__ = BidirectionalAStar(init, goal)
lowerCamelCase__ = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 40 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class A__ ( A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = GPTSwaTokenizer
_lowercase : Optional[Any] = False
_lowercase : int = True
_lowercase : Union[str, Any] = False
def __magic_name__ ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Tuple = GPTSwaTokenizer(__a , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : List[str] , A_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "This is a test"
_lowerCAmelCase : int = "This is a test"
return input_text, output_text
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "<s>"
_lowerCAmelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 2_0_0_0 )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = GPTSwaTokenizer(__a )
_lowerCAmelCase : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__a , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
_lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
# fmt: off
self.assertListEqual(
__a , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Dict = GPTSwaTokenizer(__a )
_lowerCAmelCase : List[Any] = ["This is a test", "I was born in 92000, and this is falsé."]
_lowerCAmelCase : List[Any] = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__a , __a ):
self.assertListEqual(tokenizer.encode_fast(__a ) , __a )
# Test that decode_fast returns the input text
for text, token_ids in zip(__a , __a ):
self.assertEqual(tokenizer.decode_fast(__a ) , __a )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_lowerCAmelCase : int = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__a , )
| 712 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A__ ( A ):
"""simple docstring"""
_lowercase : List[str] = ''''''
_lowercase : Optional[Any] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Dict , A_ : Optional[DatasetInfo] = None , A_ : Optional[str] = None , **A_ : int , ):
'''simple docstring'''
super().__init__(self , **A_ )
_lowerCAmelCase : Union[str, Any] = repo_info
_lowerCAmelCase : Optional[int] = token
_lowerCAmelCase : str = None
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
if self.dir_cache is None:
_lowerCAmelCase : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Optional[int] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(A_ ): {"name": str(A_ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : Dict , A_ : str , A_ : str = "rb" , **A_ : Optional[int] , ):
'''simple docstring'''
if not isinstance(self.repo_info , A_ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_lowerCAmelCase : str = hf_hub_url(self.repo_info.id , A_ , revision=self.repo_info.sha )
return fsspec.open(
A_ , mode=A_ , headers=get_authentication_headers_for_url(A_ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __magic_name__ ( self : Union[str, Any] , A_ : List[Any] , **A_ : str ):
'''simple docstring'''
self._get_dirs()
_lowerCAmelCase : Optional[int] = self._strip_protocol(A_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A_ )
def __magic_name__ ( self : Any , A_ : str , A_ : Any=False , **A_ : Dict ):
'''simple docstring'''
self._get_dirs()
_lowerCAmelCase : Dict = PurePosixPath(path.strip("/" ) )
_lowerCAmelCase : str = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : List[str] = PurePosixPath(p.strip("/" ) )
_lowerCAmelCase : Tuple = p.parent
if root == path:
_lowerCAmelCase : Union[str, Any] = f
_lowerCAmelCase : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 503 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = 0
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_a ), 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_a ), 0 )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 12 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a, (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 20 )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a, _a )
# Check that tokenizer_type ≠ model_type
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, config=_a )
self.assertIsInstance(_a, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 12 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt", os.path.join(_a, "vocab.txt" ) )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, tokenizer_type="bert", use_fast=_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json", os.path.join(_a, "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt", os.path.join(_a, "merges.txt" ) )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, tokenizer_type="gpt2", use_fast=_a )
self.assertIsInstance(_a, _a )
@require_tokenizers
def __lowerCAmelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt", os.path.join(_a, "vocab.txt" ) )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, tokenizer_type="bert" )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json", os.path.join(_a, "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt", os.path.join(_a, "merges.txt" ) )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, tokenizer_type="gpt2" )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with pytest.raises(_a ):
AutoTokenizer.from_pretrained("./", tokenizer_type="xxx" )
@require_tokenizers
def __lowerCAmelCase ( self ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(_a, (BertTokenizer, BertTokenizerFast) )
if isinstance(_a, _a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case, _a )
else:
self.assertEqual(tokenizer.do_lower_case, _a )
self.assertEqual(tokenizer.model_max_length, 5_12 )
@require_tokenizers
def __lowerCAmelCase ( self ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_a, "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier", ):
__SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = TOKENIZER_MAPPING.values()
__SCREAMING_SNAKE_CASE = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_a )
@require_tokenizers
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased", use_fast=_a ), _a )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ), _a )
@require_tokenizers
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilbert-base-uncased", do_lower_case=_a )
__SCREAMING_SNAKE_CASE = "Hello, world. How are you?"
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(_a )
self.assertEqual("[UNK]", tokens[0] )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("microsoft/mpnet-base", do_lower_case=_a )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(_a )
self.assertEqual("[UNK]", tokens[0] )
@require_tokenizers
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(_a ), _a )
self.assertEqual(tokenizer.model_max_length, 5_12 )
self.assertEqual(tokenizer.vocab_size, 3_00_00 )
self.assertEqual(tokenizer.unk_token, "[UNK]" )
self.assertEqual(tokenizer.padding_side, "right" )
self.assertEqual(tokenizer.truncation_side, "right" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a, (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a, tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size, 12 )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = get_tokenizer_config("bert-base-cased" )
__SCREAMING_SNAKE_CASE = config.pop("_commit_hash", _a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_a, {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__SCREAMING_SNAKE_CASE = get_tokenizer_config(_a )
self.assertDictEqual(_a, {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = get_tokenizer_config(_a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"], "BertTokenizer" )
def __lowerCAmelCase ( self ) -> int:
try:
AutoConfig.register("custom", _a )
AutoTokenizer.register(_a, slow_tokenizer_class=_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a, slow_tokenizer_class=_a )
__SCREAMING_SNAKE_CASE = CustomTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a, _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("custom", _a )
# Can register in two steps
AutoTokenizer.register(_a, slow_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, None) )
AutoTokenizer.register(_a, fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_a, slow_tokenizer_class=_a, fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a, fast_tokenizer_class=_a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained(_a )
bert_tokenizer.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = CustomTokenizerFast.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, use_fast=_a )
self.assertIsInstance(_a, _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaises(_a ):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, trust_remote_code=_a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizerFast" )
# Test we can also load the slow version
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=_a, use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, trust_remote_code=_a, use_fast=_a )
self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizer" )
@require_tokenizers
def __lowerCAmelCase ( self ) -> Union[str, Any]:
class __SCREAMING_SNAKE_CASE ( __snake_case ):
SCREAMING_SNAKE_CASE__ =False
class __SCREAMING_SNAKE_CASE ( __snake_case ):
SCREAMING_SNAKE_CASE__ =NewTokenizer
SCREAMING_SNAKE_CASE__ =False
try:
AutoConfig.register("custom", _a )
AutoTokenizer.register(_a, slow_tokenizer_class=_a )
AutoTokenizer.register(_a, fast_tokenizer_class=_a )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer", use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=_a, use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=_a, use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy", trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast" )
# Test we can also load the slow version
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy", trust_remote_code=_a, use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer" )
def __lowerCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(
_a, "bert-base is not a local folder and is not a valid model identifier" ):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base" )
def __lowerCAmelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(
_a, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_a, revision="aaaaaa" )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 693 |
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=1 , **SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = factor * value
UpperCamelCase = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 386 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
import math
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE ):
__A= f"""Input value of [number={number}] must be an integer"""
raise TypeError(_SCREAMING_SNAKE_CASE )
if number < 1:
__A= f"""Input value of [number={number}] must be > 0"""
raise ValueError(_SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A= int(math.log(number // 3,2 ) ) + 2
__A= [3, 5]
__A= 2
__A= 3
for block in range(1,_SCREAMING_SNAKE_CASE ):
for _ in range(_SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
UpperCAmelCase__ = 0
try:
UpperCAmelCase__ = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 186 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = to_pil_image(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = pil_image.size
lowerCAmelCase : Union[str, Any] = pytesseract.image_to_data(SCREAMING_SNAKE_CASE__ ,lang=SCREAMING_SNAKE_CASE__ ,output_type="""dict""" ,config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCAmelCase : List[Any] = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if not word.strip()]
lowerCAmelCase : int = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
lowerCAmelCase : Union[str, Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
lowerCAmelCase : Union[str, Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
lowerCAmelCase : Optional[int] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase : Dict = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE__ )
# finally, normalize the bounding boxes
lowerCAmelCase : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase: Tuple = ["pixel_values"]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = None , lowercase_ = "" , **lowercase_ , ) -> List[Any]:
super().__init__(**snake_case__ )
lowerCAmelCase : List[Any] = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = do_resize
lowerCAmelCase : Union[str, Any] = size
lowerCAmelCase : Tuple = resample
lowerCAmelCase : Tuple = do_rescale
lowerCAmelCase : Any = rescale_value
lowerCAmelCase : Optional[Any] = do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase : int = apply_ocr
lowerCAmelCase : Tuple = ocr_lang
lowerCAmelCase : str = tesseract_config
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
lowerCAmelCase : Dict = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Optional[Any]:
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> int:
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> List[str]:
lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Tuple = size if size is not None else self.size
lowerCAmelCase : List[str] = get_size_dict(snake_case__ )
lowerCAmelCase : int = resample if resample is not None else self.resample
lowerCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase : str = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase : Tuple = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
lowerCAmelCase : Any = [to_numpy_array(snake_case__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : List[Any] = []
for image in images:
lowerCAmelCase : Any = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
lowerCAmelCase : Dict = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase : Union[str, Any] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase : List[Any] = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase : Dict = BatchFeature(data={"""pixel_values""": images} , tensor_type=snake_case__ )
if apply_ocr:
lowerCAmelCase : List[str] = words_batch
lowerCAmelCase : Optional[int] = boxes_batch
return data
| 719 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__="cls" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = project_dim
SCREAMING_SNAKE_CASE_ : List[str] = pooler_fn
SCREAMING_SNAKE_CASE_ : List[Any] = learn_encoder
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_attention_mask
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = [r"""pooler""", r"""logit_scale"""]
_UpperCAmelCase = [r"""position_ids""", r"""predictions.decoder.bias"""]
_UpperCAmelCase = """roberta"""
_UpperCAmelCase = RobertaSeriesConfig
def __init__( self , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = XLMRobertaModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase__ , 'has_pre_transformation' , lowerCAmelCase__ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCamelCase__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[Any] = self.base_model(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase__ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ : List[str] = outputs['hidden_states'][-2]
SCREAMING_SNAKE_CASE_ : List[str] = self.pre_LN(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.transformation_pre(lowerCAmelCase__ )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ : int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 101 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : KarrasVeScheduler
def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ):
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A = self.scheduler.schedule[t]
_A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_A = step_output.prev_sample
_A = (sample / 2 + 0.5).clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase = logging.get_logger(__name__)
lowercase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : int = bs[:]
snake_case__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
snake_case__ : Union[str, Any] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A__ ( _UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = set()
snake_case__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Tuple = char
return pairs
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : int = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else bos_token
snake_case__ : Optional[int] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else eos_token
snake_case__ : Optional[int] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else sep_token
snake_case__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else cls_token
snake_case__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else unk_token
snake_case__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="utf-8") as vocab_handle:
snake_case__ : List[str] = json.load(lowerCamelCase__)
snake_case__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : str = errors # how to handle errors in decoding
snake_case__ : str = bytes_to_unicode()
snake_case__ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8") as merges_handle:
snake_case__ : Tuple = merges_handle.read().split("\n")[1:-1]
snake_case__ : List[str] = [tuple(merge.split()) for merge in bpe_merges]
snake_case__ : Dict = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
snake_case__ : List[Any] = {}
snake_case__ : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return len(self.encoder)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self , lowerCamelCase__) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case__ : List[Any] = tuple(lowerCamelCase__)
snake_case__ : List[str] = get_pairs(lowerCamelCase__)
if not pairs:
return token
while True:
snake_case__ : Tuple = min(lowerCamelCase__ , key=lambda lowerCamelCase__: self.bpe_ranks.get(lowerCamelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : int = bigram
snake_case__ : Optional[Any] = []
snake_case__ : List[str] = 0
while i < len(lowerCamelCase__):
try:
snake_case__ : Dict = word.index(lowerCamelCase__ , lowerCamelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
snake_case__ : List[str] = j
if word[i] == first and i < len(lowerCamelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
snake_case__ : int = tuple(lowerCamelCase__)
snake_case__ : Optional[int] = new_word
if len(lowerCamelCase__) == 1:
break
else:
snake_case__ : int = get_pairs(lowerCamelCase__)
snake_case__ : List[Any] = " ".join(lowerCamelCase__)
snake_case__ : Any = word
return word
def UpperCAmelCase ( self , lowerCamelCase__) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = []
for token in re.findall(self.pat , lowerCamelCase__):
snake_case__ : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__).split(" "))
return bpe_tokens
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__) -> Dict:
'''simple docstring'''
snake_case__ : Tuple = "".join(lowerCamelCase__)
snake_case__ : Optional[int] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
snake_case__ : Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__) + "\n")
snake_case__ : Tuple = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!")
snake_case__ : Union[str, Any] = token_index
writer.write(" ".join(lowerCamelCase__) + "\n")
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__)) + [1]
return [1] + ([0] * len(lowerCamelCase__)) + [1, 1] + ([0] * len(lowerCamelCase__)) + [1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> List[int]:
'''simple docstring'''
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__) > 0 and not text[0].isspace()):
snake_case__ : Dict = " " + text
return (text, kwargs)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> dict:
'''simple docstring'''
snake_case__ : List[Any] = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"]) != len(lowerCamelCase__)
if needs_to_be_padded:
snake_case__ : str = len(lowerCamelCase__) - len(encoded_inputs["global_attention_mask"])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : str = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
| 150 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
lowercase = """"""
lowercase = """"""
lowercase = """"""
lowercase = """"""
def A__ ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
snake_case__ : Any = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : Any = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
snake_case__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
snake_case__ : Optional[int] = api.user_timeline(screen_name=_UpperCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
snake_case__ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
snake_case__ : int = api.user_timeline(
screen_name=_UpperCAmelCase , count=2_00 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
snake_case__ : List[Any] = alltweets[-1].id - 1
print(F"""...{len(_UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
snake_case__ : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
snake_case__ : Optional[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 150 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any]=False ):
SCREAMING_SNAKE_CASE__: Optional[Any]= []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__: Any= [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__: List[Any]= ''''''
else:
SCREAMING_SNAKE_CASE__: List[str]= '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__: Optional[Any]= state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE__: Any= state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__: str= in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__: List[str]= in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__: Any= in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__: List[str]= in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__: str= in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__: List[Any]= in_proj_bias[-config.hidden_size :]
def A__ ( snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: Optional[Any]= ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def A__ ( snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: str= dct.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= val
def A__ ( ):
SCREAMING_SNAKE_CASE__: Optional[int]= '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[int]=False ):
SCREAMING_SNAKE_CASE__: List[Any]= BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=snake_case_ , )
SCREAMING_SNAKE_CASE__: Tuple= ViTHybridConfig(backbone_config=snake_case_ , image_size=384 , num_labels=1_000 )
SCREAMING_SNAKE_CASE__: Tuple= False
# load original model from timm
SCREAMING_SNAKE_CASE__: Optional[Any]= timm.create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__: Tuple= timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
SCREAMING_SNAKE_CASE__: str= create_rename_keys(snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__: Dict= '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__: Tuple= json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__: Tuple= {int(snake_case_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__: Optional[int]= idalabel
SCREAMING_SNAKE_CASE__: Optional[int]= {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__: Tuple= ViTHybridModel(snake_case_ ).eval()
else:
SCREAMING_SNAKE_CASE__: int= ViTHybridForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# create image processor
SCREAMING_SNAKE_CASE__: List[Any]= create_transform(**resolve_data_config({} , model=snake_case_ ) )
SCREAMING_SNAKE_CASE__: Dict= transform.transforms
SCREAMING_SNAKE_CASE__: str= {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__: int= ViTHybridImageProcessor(
do_resize=snake_case_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__: List[Any]= prepare_img()
SCREAMING_SNAKE_CASE__: int= transform(snake_case_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__: Any= processor(snake_case_ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Tuple= model(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE__: List[str]= timm_model.forward_features(snake_case_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case_ , outputs.pooler_output , atol=1E-3 )
else:
SCREAMING_SNAKE_CASE__: int= timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowercase_ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=2 , lowercase=3 , lowercase=4 , lowercase=2 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=36 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=6 , lowercase=6 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=1000 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = patch_size
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = coordinate_size
A__ = shape_size
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A__ = text_seq_length
A__ = (image_size // patch_size) ** 2 + 1
A__ = self.text_seq_length + self.image_seq_length
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
A__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = tmp_coordinate
A__ = tf.constant(lowercase )
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.text_seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = TFLayoutLMvaModel(config=lowercase )
# text + image
A__ = model(lowercase , pixel_values=lowercase , training=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , training=lowercase , )
A__ = model(lowercase , bbox=lowercase , pixel_values=lowercase , training=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A__ = model(lowercase , training=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A__ = model({"pixel_values": pixel_values} , training=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForSequenceClassification(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , training=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForTokenClassification(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , training=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = 2
A__ = TFLayoutLMvaForQuestionAnswering(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , training=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
return True
def UpperCamelCase ( self , lowercase , lowercase , lowercase=False ) -> dict:
'''simple docstring'''
A__ = copy.deepcopy(lowercase )
if model_class in get_values(lowercase ):
A__ = {
k: tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase ):
A__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFLayoutLMvaModelTester(self )
A__ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase )
if getattr(lowercase , "hf_compute_loss" , lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase )[0]
]
A__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class.pop("input_ids" )
A__ = model(lowercase , **lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
A__ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A__ = -100
A__ = tf.convert_to_tensor(lowercase )
A__ = model(lowercase , **lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = model(lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
# Get keys that were added with the _prepare_for_class function
A__ = prepared_for_class.keys() - inputs_dict.keys()
A__ = inspect.signature(model.call ).parameters
A__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A__ = {0: "input_ids"}
for label_key in label_keys:
A__ = signature_names.index(lowercase )
A__ = label_key
A__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A__ = prepared_for_class[value]
A__ = tuple(lowercase )
# Send to model
A__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFLayoutLMvaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowercase ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase , return_tensors="tf" ).pixel_values
A__ = tf.constant([[1, 2]] )
A__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
A__ = model(input_ids=lowercase , bbox=lowercase , pixel_values=lowercase , training=lowercase )
# verify the logits
A__ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
A__ = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ) )
| 514 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
UpperCamelCase = """"""
while len(__UpperCamelCase ) % 3 != 0:
UpperCamelCase = """0""" + bin_string
UpperCamelCase = [
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase = 0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE = 6 ) -> None:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = Node()
UpperCamelCase = current_node
UpperCamelCase = current_node
UpperCamelCase = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = Node()
UpperCamelCase = current_node
UpperCamelCase = previous_node
UpperCamelCase = current_node
UpperCamelCase = self.front
UpperCamelCase = previous_node
def A__ ( self ) -> bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A__ ( self ) -> Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCamelCase = self.rear.next
if self.rear:
UpperCamelCase = data
def A__ ( self ) -> Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCamelCase = self.front.data
UpperCamelCase = None
return data
UpperCamelCase = self.front
UpperCamelCase = old_front.next
UpperCamelCase = old_front.data
UpperCamelCase = None
return data
def A__ ( self ) -> None:
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def A__ ( self ) -> None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class a_ :
def __init__( self ) -> None:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase = int(__UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = divmod(__UpperCamelCase , 2 )
return binary_recursive(__UpperCamelCase ) + str(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase = str(__UpperCamelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
UpperCamelCase = """-""" if number.startswith("""-""" ) else """"""
UpperCamelCase = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F"{negative}0b{binary_recursive(int(__UpperCamelCase ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 301 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE = 6 ) -> None:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = Node()
UpperCamelCase = current_node
UpperCamelCase = current_node
UpperCamelCase = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = Node()
UpperCamelCase = current_node
UpperCamelCase = previous_node
UpperCamelCase = current_node
UpperCamelCase = self.front
UpperCamelCase = previous_node
def A__ ( self ) -> bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A__ ( self ) -> Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCamelCase = self.rear.next
if self.rear:
UpperCamelCase = data
def A__ ( self ) -> Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCamelCase = self.front.data
UpperCamelCase = None
return data
UpperCamelCase = self.front
UpperCamelCase = old_front.next
UpperCamelCase = old_front.data
UpperCamelCase = None
return data
def A__ ( self ) -> None:
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def A__ ( self ) -> None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class a_ :
def __init__( self ) -> None:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 1 |
from __future__ import annotations
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 |
from maths.prime_factors import prime_factors
def _lowerCamelCase ( _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
_lowerCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_a )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 243 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
A__ = jnp.ones((batch_size, length)) / length
return scores
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
A__ = None
A__ = 20
A__ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__)
# tweak scores to not be uniform anymore
A__ = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
A__ = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
A__ = jax.nn.softmax(UpperCAmelCase__ , axis=-1)
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTemperatureLogitsWarper(temperature=1.3)
A__ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1)
A__ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
A__ = None
A__ = 10
A__ = 2
# create ramp distribution
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
A__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
A__ = FlaxTopKLogitsWarper(3)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
A__ = 5
A__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, length)).copy()
A__ = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = None
A__ = 10
A__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
A__ = FlaxTopPLogitsWarper(0.8)
A__ = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# check edge cases with negative and extreme logits
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
A__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
# check that min length is applied at length 5
A__ = ids_tensor((batch_size, 20) , vocab_size=20)
A__ = 5
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')])
# check that min length is not applied anymore at length 15
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = 15
A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
# check that all scores are -inf except the bos_token_id score
A__ = ids_tensor((batch_size, 1) , vocab_size=20)
A__ = 1
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A__ = 3
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = 5
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
A__ = ids_tensor((batch_size, 4) , vocab_size=20)
A__ = 4
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A__ = 3
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = 4
A__ = 10
A__ = 15
A__ = 2
A__ = 1
A__ = 15
# dummy input_ids and scores
A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__)
A__ = input_ids.copy()
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scores.copy()
# instantiate all dist processors
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTopKLogitsWarper(3)
A__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
A__ = 10
# no processor list
A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# with processor list
A__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = 4
A__ = 10
A__ = 15
A__ = 2
A__ = 1
A__ = 15
# dummy input_ids and scores
A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__)
A__ = input_ids.copy()
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scores.copy()
# instantiate all dist processors
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTopKLogitsWarper(3)
A__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
A__ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]):
A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
return scores
# with processor list
def run_processor_list(UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]):
A__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
return scores
A__ = jax.jit(UpperCAmelCase__)
A__ = jax.jit(UpperCAmelCase__)
A__ = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 177 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
A__ = jnp.ones((batch_size, length)) / length
return scores
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
A__ = None
A__ = 20
A__ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__)
# tweak scores to not be uniform anymore
A__ = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
A__ = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
A__ = jax.nn.softmax(UpperCAmelCase__ , axis=-1)
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTemperatureLogitsWarper(temperature=1.3)
A__ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1)
A__ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
A__ = None
A__ = 10
A__ = 2
# create ramp distribution
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
A__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
A__ = FlaxTopKLogitsWarper(3)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
A__ = 5
A__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, length)).copy()
A__ = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = None
A__ = 10
A__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
A__ = FlaxTopPLogitsWarper(0.8)
A__ = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# check edge cases with negative and extreme logits
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
A__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
# check that min length is applied at length 5
A__ = ids_tensor((batch_size, 20) , vocab_size=20)
A__ = 5
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')])
# check that min length is not applied anymore at length 15
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = 15
A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
# check that all scores are -inf except the bos_token_id score
A__ = ids_tensor((batch_size, 1) , vocab_size=20)
A__ = 1
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A__ = 3
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = 5
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
A__ = ids_tensor((batch_size, 4) , vocab_size=20)
A__ = 4
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A__ = 3
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = 4
A__ = 10
A__ = 15
A__ = 2
A__ = 1
A__ = 15
# dummy input_ids and scores
A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__)
A__ = input_ids.copy()
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scores.copy()
# instantiate all dist processors
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTopKLogitsWarper(3)
A__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
A__ = 10
# no processor list
A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# with processor list
A__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = 4
A__ = 10
A__ = 15
A__ = 2
A__ = 1
A__ = 15
# dummy input_ids and scores
A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__)
A__ = input_ids.copy()
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scores.copy()
# instantiate all dist processors
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTopKLogitsWarper(3)
A__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
A__ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]):
A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
return scores
# with processor list
def run_processor_list(UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]):
A__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
return scores
A__ = jax.jit(UpperCAmelCase__)
A__ = jax.jit(UpperCAmelCase__)
A__ = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 177 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "microsoft/speecht5_tts"
UpperCAmelCase__ : int = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
UpperCAmelCase__ : List[Any] = "text_reader"
UpperCAmelCase__ : Optional[Any] = SpeechTaProcessor
UpperCAmelCase__ : Union[str, Any] = SpeechTaForTextToSpeech
UpperCAmelCase__ : Union[str, Any] = SpeechTaHifiGan
UpperCAmelCase__ : Dict = ["text"]
UpperCAmelCase__ : str = ["audio"]
def __lowercase ( self ) -> Tuple:
if self.post_processor is None:
_a : str = '''microsoft/speecht5_hifigan'''
super().setup()
def __lowercase ( self , _a , _a=None ) -> Optional[Any]:
_a : Union[str, Any] = self.pre_processor(text=_a , return_tensors='''pt''' , truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_a : Dict = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_a : List[str] = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowercase ( self , _a ) -> List[str]:
with torch.no_grad():
return self.model.generate_speech(**_a )
def __lowercase ( self , _a ) -> Tuple:
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 14 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 1 |
def UpperCamelCase_( _A :str )-> bool:
UpperCamelCase__ = [int(_A ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(_A ) == 4 and all(0 <= int(_A ) <= 2_54 for octet in octets )
if __name__ == "__main__":
__UpperCamelCase = input().strip()
__UpperCamelCase = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 185 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=64 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
UpperCamelCase__ = model(snake_case , token_type_ids=snake_case )
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MegatronBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MegatronBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MegatronBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : int = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
# test_resize_embeddings = False
_UpperCamelCase : List[Any] = False
def snake_case__ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCamelCase__ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case )
def UpperCamelCase_( _A :List[Any] )-> Optional[Any]:
return torch.tensor(
_A , dtype=torch.long , device=_A , )
__UpperCamelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase__ = os.path.join(os.environ["MYDIR"] , snake_case )
UpperCamelCase__ = MegatronBertModel.from_pretrained(snake_case )
model.to(snake_case )
model.half()
UpperCamelCase__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case )[0]
UpperCamelCase__ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , snake_case )
UpperCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase__ = output[0, ii, jj]
UpperCamelCase__ = expected[3 * ii + jj]
UpperCamelCase__ = "ii={} jj={} a={} b={}".format(snake_case , snake_case , snake_case , snake_case )
self.assertTrue(math.isclose(snake_case , snake_case , rel_tol=snake_case , abs_tol=snake_case ) , msg=snake_case )
| 185 | 1 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE , x % y )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 20 ) -> int:
lowercase__ = 1
for i in range(1 , n + 1 ):
lowercase__ = lcm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase_ = """src/transformers"""
# Matches is_xxx_available()
lowercase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowercase_ = re.compile(R"""^\s*else:""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
lowercase__ = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
lowercase__ = re.findall('\[([^\]]+)\]' , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
def find_duplicates(_SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCamelCase () -> Tuple:
lowercase__ = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase__ = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase__ = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(_SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase () -> Optional[int]:
lowercase__ = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace(os.path.sep , '.' )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
lowercase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCamelCase () -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = importlib.util.spec_from_file_location(
'transformers' , os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase__ = spec.loader.load_module()
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 235 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : Union[str, Any] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 5 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.