code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def lowerCamelCase__ ( _a , _a):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase__ ( _a , _a=0):
return sorted(_UpperCamelCase , key=lambda _a: x[column])
def lowerCamelCase__ ( _a , _a , _a=float("inf")):
for i in range(points_counts - 1):
for j in range(i + 1 , _UpperCamelCase):
SCREAMING_SNAKE_CASE : Optional[int] = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
SCREAMING_SNAKE_CASE : Optional[int] = current_dis
return min_dis
def lowerCamelCase__ ( _a , _a , _a=float("inf")):
for i in range(min(6 , points_counts - 1) , _UpperCamelCase):
for j in range(max(0 , i - 6) , _UpperCamelCase):
SCREAMING_SNAKE_CASE : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
SCREAMING_SNAKE_CASE : Optional[int] = current_dis
return min_dis
def lowerCamelCase__ ( _a , _a , _a):
if points_counts <= 3:
return dis_between_closest_pair(_UpperCamelCase , _UpperCamelCase)
# recursion
SCREAMING_SNAKE_CASE : Union[str, Any] = points_counts // 2
SCREAMING_SNAKE_CASE : List[Any] = closest_pair_of_points_sqr(
_UpperCamelCase , points_sorted_on_y[:mid] , _UpperCamelCase)
SCREAMING_SNAKE_CASE : Optional[Any] = closest_pair_of_points_sqr(
_UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid)
SCREAMING_SNAKE_CASE : Optional[int] = min(_UpperCamelCase , _UpperCamelCase)
SCREAMING_SNAKE_CASE : Tuple = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
cross_strip.append(_UpperCamelCase)
SCREAMING_SNAKE_CASE : str = dis_between_closest_in_strip(
_UpperCamelCase , len(_UpperCamelCase) , _UpperCamelCase)
return min(_UpperCamelCase , _UpperCamelCase)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = column_based_sort(_UpperCamelCase , column=0)
SCREAMING_SNAKE_CASE : int = column_based_sort(_UpperCamelCase , column=1)
return (
closest_pair_of_points_sqr(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
) ** 0.5
if __name__ == "__main__":
a_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 76 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = None
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self.head
while self.head:
yield node.data
snake_case = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(lowerCAmelCase ) for item in iter(self ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(len(self ) , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(0 , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
snake_case = Node(lowerCAmelCase )
if self.head is None:
snake_case = new_node # first node points itself
snake_case = snake_case = new_node
elif index == 0: # insert at head
snake_case = self.head
snake_case = snake_case = new_node
else:
snake_case = self.head
for _ in range(index - 1 ):
snake_case = temp.next
snake_case = temp.next
snake_case = new_node
if index == len(self ) - 1: # insert at tail
snake_case = new_node
def snake_case ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def snake_case ( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def snake_case ( self , lowerCAmelCase = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
snake_case = self.head
if self.head == self.tail: # just one node
snake_case = snake_case = None
elif index == 0: # delete head node
snake_case = self.tail.next.next
snake_case = self.head.next
else:
snake_case = self.head
for _ in range(index - 1 ):
snake_case = temp.next
snake_case = temp.next
snake_case = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case = temp
return delete_node.data
def snake_case ( self ):
"""simple docstring"""
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
"""simple docstring"""
snake_case = CircularLinkedList()
assert len(_UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCamelCase ) == i
circular_linked_list.insert_nth(_UpperCamelCase , i + 1 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = UnCLIPImageVariationPipeline
a = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
a = IMAGE_VARIATION_BATCH_PARAMS
a = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
a = False
@property
def lowercase_ ( self : str ) -> Union[str, Any]:
return 32
@property
def lowercase_ ( self : int ) -> Any:
return 32
@property
def lowercase_ ( self : Tuple ) -> Dict:
return self.time_input_dim
@property
def lowercase_ ( self : Tuple ) -> Any:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Optional[Any] ) -> Dict:
return 100
@property
def lowercase_ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowerCamelCase )
@property
def lowercase_ ( self : Any ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__lowerCamelCase )
@property
def lowercase_ ( self : Tuple ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
SCREAMING_SNAKE_CASE__ = UnCLIPTextProjModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : Any ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : Union[str, Any] ) -> Tuple:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase_ ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowercase_ ( self : int ) -> List[Any]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.dummy_decoder
SCREAMING_SNAKE_CASE__ = self.dummy_text_proj
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_super_res_first
SCREAMING_SNAKE_CASE__ = self.dummy_super_res_last
SCREAMING_SNAKE_CASE__ = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
SCREAMING_SNAKE_CASE__ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor(crop_size=32 , size=32 )
SCREAMING_SNAKE_CASE__ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase_ ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : List[str]=True ) -> Any:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE__ = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = DiffusionPipeline.numpy_to_pil(__lowerCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase , pil_image=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase , pil_image=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(
**__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase , pil_image=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase , pil_image=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(
**__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase , pil_image=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase , pil_image=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
SCREAMING_SNAKE_CASE__ = pipe(
**__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = torch.device('''cpu''' )
class UpperCAmelCase__ :
"""simple docstring"""
a = 1
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe.decoder.dtype
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
SCREAMING_SNAKE_CASE__ = pipe.prepare_latents(
__lowerCamelCase , dtype=__lowerCamelCase , device=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE__ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
SCREAMING_SNAKE_CASE__ = pipe.prepare_latents(
__lowerCamelCase , dtype=__lowerCamelCase , device=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase , pil_image=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(
**__lowerCamelCase , decoder_latents=__lowerCamelCase , super_res_latents=__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase , pil_image=__lowerCamelCase )
# Don't pass image, instead pass embedding
SCREAMING_SNAKE_CASE__ = pipeline_inputs.pop('''image''' )
SCREAMING_SNAKE_CASE__ = pipe.image_encoder(__lowerCamelCase ).image_embeds
SCREAMING_SNAKE_CASE__ = pipe(
**__lowerCamelCase , decoder_latents=__lowerCamelCase , super_res_latents=__lowerCamelCase , image_embeddings=__lowerCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowercase_ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
SCREAMING_SNAKE_CASE__ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCamelCase , expected_max_diff=__lowerCamelCase )
@skip_mps
def lowercase_ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , additional_params_copy_to_batched_inputs=__lowerCamelCase , )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
SCREAMING_SNAKE_CASE__ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__lowerCamelCase , additional_params_copy_to_batched_inputs=__lowerCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__lowerCamelCase )
@skip_mps
def lowercase_ ( self : Tuple ) -> str:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self : int ) -> Dict:
return super().test_save_load_local()
@skip_mps
def lowercase_ ( self : int ) -> Any:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipeline(
__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase , 15 )
| 218 |
import comet # From: unbabel-comet
import torch
import datasets
_SCREAMING_SNAKE_CASE : List[str] = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_SCREAMING_SNAKE_CASE : str = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Dict ) -> Tuple:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase_ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=False ) -> str:
if gpus is None:
SCREAMING_SNAKE_CASE__ = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
SCREAMING_SNAKE_CASE__ = [dict(zip(__lowerCamelCase , __lowerCamelCase ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.scorer.predict(__lowerCamelCase , gpus=__lowerCamelCase , progress_bar=__lowerCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 218 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = StableDiffusionDiffEditPipeline
__lowerCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
__lowerCAmelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
__lowerCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase : Tuple = frozenset([] )
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , )
_lowerCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
_lowerCAmelCase : Optional[int] = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_zero=snake_case_ , )
torch.manual_seed(0 )
_lowerCAmelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
_lowerCAmelCase : Optional[int] = CLIPTextModel(snake_case_ )
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : int = floats_tensor((1, 1_6, 1_6) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : int = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Tuple = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : List[str] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : str = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Optional[Any] = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" )
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Dict = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : str = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : int = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : str = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" )
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : List[str] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : List[str] = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(snake_case_ , snake_case_ , snake_case_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_lowerCAmelCase : str = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : List[Any] = pipe(**snake_case_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case_ )
_lowerCAmelCase : List[str] = self.pipeline_class.from_pretrained(snake_case_ )
pipe_loaded.to(snake_case_ )
pipe_loaded.set_progress_bar_config(disable=snake_case_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case_ , snake_case_ ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : List[Any] = pipe_loaded(**snake_case_ )[0]
_lowerCAmelCase : str = np.abs(output - output_loaded ).max()
self.assertLess(snake_case_ , 1E-4 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(snake_case_ )
_lowerCAmelCase : int = pipe.generate_mask(**snake_case_ )
_lowerCAmelCase : Optional[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
_lowerCAmelCase : str = np.array([0] * 9 )
_lowerCAmelCase : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = """cpu"""
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Tuple = self.get_dummy_inversion_inputs(snake_case_ )
_lowerCAmelCase : List[Any] = pipe.invert(**snake_case_ ).images
_lowerCAmelCase : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_lowerCAmelCase : Optional[int] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
_lowerCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """cpu"""
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : Tuple = {"""beta_start""": 0.0_0085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler(**snake_case_ )
_lowerCAmelCase : Tuple = DPMSolverMultistepInverseScheduler(**snake_case_ )
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inversion_inputs(snake_case_ )
_lowerCAmelCase : Union[str, Any] = pipe.invert(**snake_case_ ).images
_lowerCAmelCase : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_lowerCAmelCase : Tuple = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
_lowerCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
@require_torch_gpu
@slow
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __UpperCamelCase ( cls ):
_lowerCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
_lowerCAmelCase : int = raw_image.convert("""RGB""" ).resize((7_6_8, 7_6_8) )
_lowerCAmelCase : Optional[Any] = raw_image
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : str = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=snake_case_ , torch_dtype=torch.floataa )
_lowerCAmelCase : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config )
_lowerCAmelCase : str = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = """a bowl of fruit"""
_lowerCAmelCase : Optional[Any] = """a bowl of pears"""
_lowerCAmelCase : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=snake_case_ , target_prompt=snake_case_ , generator=snake_case_ , )
_lowerCAmelCase : Optional[int] = pipe.invert(
prompt=snake_case_ , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case_ ).latents
_lowerCAmelCase : Optional[int] = pipe(
prompt=snake_case_ , mask_image=snake_case_ , image_latents=snake_case_ , generator=snake_case_ , negative_prompt=snake_case_ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
_lowerCAmelCase : int = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=snake_case_ , torch_dtype=torch.floataa )
_lowerCAmelCase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCAmelCase : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = """a bowl of fruit"""
_lowerCAmelCase : List[str] = """a bowl of pears"""
_lowerCAmelCase : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=snake_case_ , target_prompt=snake_case_ , generator=snake_case_ , )
_lowerCAmelCase : Union[str, Any] = pipe.invert(
prompt=snake_case_ , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case_ , num_inference_steps=2_5 , ).latents
_lowerCAmelCase : List[str] = pipe(
prompt=snake_case_ , mask_image=snake_case_ , image_latents=snake_case_ , generator=snake_case_ , negative_prompt=snake_case_ , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type="""numpy""" , ).images[0]
_lowerCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 309 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309 | 1 |
"""simple docstring"""
from math import sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_00_00 ) -> int:
SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 38 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''allenai/led-base-16384''': 16384,
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , lowerCAmelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
SCREAMING_SNAKE_CASE = value
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> dict:
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 38 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase:
lowercase__ = None
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowercase_)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(lowercase_ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase_)
_UpperCamelCase = self.feature_extraction_class.from_json_file(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = feat_extract_first.save_pretrained(lowercase_)[0]
check_json_file_has_correct_format(lowercase_)
_UpperCamelCase = self.feature_extraction_class.from_pretrained(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class()
self.assertIsNotNone(lowercase_)
| 194 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ : List[Any] =[('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
_lowerCamelCase : Optional[Any] =CursorInfo()
_lowerCamelCase : Dict =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_lowerCamelCase : Any =False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
_lowerCamelCase : Any =CursorInfo()
_lowerCamelCase : Optional[Any] =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_lowerCamelCase : Union[str, Any] =True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 199 | 0 |
from __future__ import annotations
lowerCamelCase : Union[str, Any] = 1.6021E-19 # units = C
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 351 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : str ):
torch.manual_seed(0 )
__snake_case: List[Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__snake_case: List[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
__snake_case: Union[str, Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__snake_case: Union[str, Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case: List[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__snake_case: str = DDPMScheduler()
__snake_case: Optional[Any] = AudioDiffusionPipeline(vqvae=A , unet=self.dummy_unet , mel=A , scheduler=A )
__snake_case: str = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__snake_case: str = torch.Generator(device=A ).manual_seed(42 )
__snake_case: Dict = pipe(generator=A , steps=4 )
__snake_case: Optional[Any] = output.audios[0]
__snake_case: Optional[Any] = output.images[0]
__snake_case: Tuple = torch.Generator(device=A ).manual_seed(42 )
__snake_case: Dict = pipe(generator=A , steps=4 , return_dict=A )
__snake_case: Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__snake_case: Any = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__snake_case: Optional[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
__snake_case: Any = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__snake_case: Optional[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__snake_case: Tuple = DDIMScheduler()
__snake_case: int = self.dummy_vqvae_and_unet
__snake_case: List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A , scheduler=A )
__snake_case: Dict = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
np.random.seed(0 )
__snake_case: Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__snake_case: str = torch.Generator(device=A ).manual_seed(42 )
__snake_case: Dict = pipe(raw_audio=A , generator=A , start_step=5 , steps=10 )
__snake_case: List[Any] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__snake_case: Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__snake_case: Any = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__snake_case: List[Any] = self.dummy_unet_condition
__snake_case: Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A , mel=A , scheduler=A )
__snake_case: Optional[int] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
np.random.seed(0 )
__snake_case: Any = torch.rand((1, 1, 10) )
__snake_case: Optional[Any] = pipe(generator=A , encoding=A )
__snake_case: Union[str, Any] = output.images[0]
__snake_case: Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__snake_case: int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: str = torch_device
__snake_case: Optional[int] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__snake_case: List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__snake_case: Dict = torch.Generator(device=A ).manual_seed(42 )
__snake_case: List[str] = pipe(generator=A )
__snake_case: Optional[int] = output.audios[0]
__snake_case: Optional[int] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__snake_case: str = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__snake_case: Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 111 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 111 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCAmelCase ( UpperCAmelCase="ro" , UpperCAmelCase="en" , UpperCAmelCase="wmt16" , UpperCAmelCase=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
snake_case_ = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
snake_case_ = datasets.load_dataset(UpperCAmelCase , UpperCAmelCase )
if save_dir is None:
snake_case_ = f'{dataset}-{pair}'
snake_case_ = Path(UpperCAmelCase )
save_dir.mkdir(exist_ok=UpperCAmelCase )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
snake_case_ = 'val' if split == 'validation' else split
snake_case_ = save_dir.joinpath(f'{fn}.source' )
snake_case_ = save_dir.joinpath(f'{fn}.target' )
snake_case_ = src_path.open('w+' )
snake_case_ = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case_ = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 312 | """simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312 | 1 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__SCREAMING_SNAKE_CASE : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 31 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase : int = int(max(0 , i - limit ) )
_UpperCAmelCase : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = F"""{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"""
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase : Union[str, Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase : Optional[Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase : Dict = 0.0
else:
_UpperCAmelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase : str = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 31 | 1 |
from math import sqrt
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_00_01 ) -> int:
"""simple docstring"""
a_ : Dict = 0
a_ : Any = 1
while count != nth and number < 3:
number += 1
if is_prime(__A ):
count += 1
while count != nth:
number += 2
if is_prime(__A ):
count += 1
return number
if __name__ == "__main__":
print(F'{solution() = }')
| 120 |
from string import ascii_uppercase
UpperCAmelCase_ : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ : Optional[int] = dict(enumerate(ascii_uppercase))
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Tuple = len(__A )
a_ : int = 0
while True:
if x == i:
a_ : Tuple = 0
if len(__A ) == len(__A ):
break
key += key[i]
i += 1
return key
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Optional[int] = ''
a_ : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Any = ''
a_ : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a_ : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
a_ : Tuple = 'THE GERMAN ATTACK'
a_ : Dict = 'SECRET'
a_ : Optional[Any] = generate_key(__A , __A )
a_ : Union[str, Any] = cipher_text(__A , __A )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__A , __A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 120 | 1 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Any , __snake_case : Any="" , __snake_case : List[Any]="train" ) -> str:
assert os.path.isdir(__snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = os.listdir(__snake_case )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_lowerCAmelCase = os.path.join(__snake_case , __snake_case )
if not os.path.isfile(__snake_case ):
continue
self.documents.append(__snake_case )
def __len__( self : str ) -> List[Any]:
return len(self.documents )
def __getitem__( self : List[Any] , __snake_case : Dict ) -> Dict:
_lowerCAmelCase = self.documents[idx]
_lowerCAmelCase = document_path.split("""/""" )[-1]
with open(__snake_case , encoding="""utf-8""" ) as source:
_lowerCAmelCase = source.read()
_lowerCAmelCase , _lowerCAmelCase = process_story(__snake_case )
return document_name, story_lines, summary_lines
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = list(filter(lambda lowerCAmelCase : len(lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
_lowerCAmelCase = [_add_missing_period(lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
_lowerCAmelCase = []
_lowerCAmelCase = deque(lowerCAmelCase )
while True:
try:
_lowerCAmelCase = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_lowerCAmelCase = list(filter(lambda lowerCAmelCase : not t.startswith("""@highlight""" ) , lowerCAmelCase ) )
return story_lines, summary_lines
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(lowerCAmelCase )) )
return sequence
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = torch.ones_like(lowerCAmelCase )
_lowerCAmelCase = sequence == pad_token_id
_lowerCAmelCase = 0
return mask
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [tokenizer.encode(lowerCAmelCase ) for line in story_lines]
_lowerCAmelCase = [token for sentence in story_lines_token_ids for token in sentence]
_lowerCAmelCase = [tokenizer.encode(lowerCAmelCase ) for line in summary_lines]
_lowerCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for sequence in batch:
_lowerCAmelCase = -1
_lowerCAmelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(lowerCAmelCase )
return torch.tensor(lowerCAmelCase )
| 70 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Optional[int] = {"vocab_file": "spiece.model"}
_lowercase : str = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_lowercase : Optional[Any] = {
"AI-Sweden/gpt-sw3-126m": 2_0_4_8,
"AI-Sweden/gpt-sw3-350m": 2_0_4_8,
"AI-Sweden/gpt-sw3-1.6b": 2_0_4_8,
"AI-Sweden/gpt-sw3-6.7b": 2_0_4_8,
"AI-Sweden/gpt-sw3-20b": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : List[str]=False, lowerCamelCase : str=False, lowerCamelCase : List[Any]=False, lowerCamelCase : List[str]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : List[str]=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Dict, )-> None:
lowerCamelCase__ : Any ={} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ : Optional[int] =kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ : List[Any] ='''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ : Tuple ='''<|endoftext|>''' if eos_token is None else eos_token
lowerCamelCase__ : Dict ='''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ : int =unk_token if pad_token is None else pad_token
lowerCamelCase__ : str =eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ : Any ='''<pad>''' if pad_token is None else pad_token
lowerCamelCase__ : str ='''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase, remove_space=lowerCamelCase, keep_accents=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, pad_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =do_lower_case
lowerCamelCase__ : Union[str, Any] =remove_space
lowerCamelCase__ : int =keep_accents
lowerCamelCase__ : Tuple =vocab_file
lowerCamelCase__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ : List[Any] ={''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ : int =re.compile(
F'''[{"".join(map(lowerCamelCase, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any )-> Optional[Any]:
lowerCamelCase__ : List[str] =self.__dict__.copy()
lowerCamelCase__ : List[Any] =None
return state
def __setstate__( self : Optional[Any], lowerCamelCase : Dict )-> int:
lowerCamelCase__ : List[str] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : Union[str, Any] ={}
lowerCamelCase__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def snake_case ( self : List[str] )-> int:
return len(self.sp_model )
def snake_case ( self : List[str], lowerCamelCase : str )-> str:
lowerCamelCase__ : Dict =self.non_printing_characters_re.sub('''''', lowerCamelCase )
# Normalize whitespaces
lowerCamelCase__ : List[Any] =''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ : Union[str, Any] =unicodedata.normalize('''NFC''', lowerCamelCase )
return text
def snake_case ( self : int, lowerCamelCase : str, **lowerCamelCase : str )-> List[str]:
lowerCamelCase__ : int =self.preprocess_text(lowerCamelCase )
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : Any, lowerCamelCase : str )-> int:
return self.sp_model.PieceToId(lowerCamelCase )
def snake_case ( self : Optional[int], lowerCamelCase : int )-> str:
return self.sp_model.IdToPiece(lowerCamelCase )
@staticmethod
def snake_case ( lowerCamelCase : str )-> str:
return out_string
def snake_case ( self : List[Any], lowerCamelCase : List[str] )-> str:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : int =''''''
lowerCamelCase__ : Any =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : Tuple =[]
else:
current_sub_tokens.append(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string
def snake_case ( self : Union[str, Any] )-> Dict[str, int]:
lowerCamelCase__ : Tuple ={self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : Dict =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def snake_case ( self : int, lowerCamelCase : Union[str, List[str]], lowerCamelCase : Union[str, bool] = False )-> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : str =self.preprocess_text(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =self.sp_model.encode(lowerCamelCase )
else:
lowerCamelCase__ : int =[self.preprocess_text(lowerCamelCase ) for t in text]
lowerCamelCase__ : Tuple =self.sp_model.encode(lowerCamelCase )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ : Union[str, Any] =torch.tensor(lowerCamelCase )
return token_ids
def snake_case ( self : int, lowerCamelCase : Union[int, List[int]] )-> str:
return self.sp_model.decode(lowerCamelCase )
def snake_case ( self : Tuple, lowerCamelCase : "Conversation" )-> List[int]:
lowerCamelCase__ : Any =[F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ : str =(
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(lowerCamelCase ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=lowerCamelCase )
| 238 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def UpperCamelCase_( snake_case : list[int] , snake_case : list[int] , snake_case : int ):
'''simple docstring'''
snake_case_ = [0] * no_of_processes
snake_case_ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(snake_case ):
snake_case_ = burst_time[i]
snake_case_ = []
snake_case_ = 0
snake_case_ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case_ = []
snake_case_ = -1
for i in range(snake_case ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(snake_case )
if len(snake_case ) > 0:
snake_case_ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case_ = i
total_time += burst_time[target_process]
completed += 1
snake_case_ = 0
snake_case_ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCamelCase_( snake_case : list[int] , snake_case : int , snake_case : list[int] ):
'''simple docstring'''
snake_case_ = [0] * no_of_processes
for i in range(snake_case ):
snake_case_ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
_SCREAMING_SNAKE_CASE : Any = 4
_SCREAMING_SNAKE_CASE : List[Any] = [2, 5, 3, 7]
_SCREAMING_SNAKE_CASE : List[Any] = [0, 0, 0, 0]
_SCREAMING_SNAKE_CASE : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_SCREAMING_SNAKE_CASE : Tuple = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 92 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _snake_case :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , a__=1_000 , ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = tf.convert_to_tensor(a__ )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMModel(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
snake_case_ = model(a__ , a__ , token_type_ids=a__ )
snake_case_ = model(a__ , a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForMaskedLM(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMForSequenceClassification(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMForTokenClassification(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForQuestionAnswering(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : List[Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[str] = 10
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = TFLayoutLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFLayoutLMModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
snake_case_ = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the sequence output on [0, :3, :3]
snake_case_ = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
snake_case_ = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a__ , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
snake_case_ = outputs.loss
snake_case_ = (2,)
self.assertEqual(loss.shape , a__ )
# test the shape of the logits
snake_case_ = outputs.logits
snake_case_ = (2, 2)
self.assertEqual(logits.shape , a__ )
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
# test the shape of the logits
snake_case_ = outputs.logits
snake_case_ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , a__ )
@slow
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the shape of the logits
snake_case_ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , a__ )
self.assertEqual(outputs.end_logits.shape , a__ )
| 92 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
lowercase__ = "CIDAS/clipseg-rd64-refined"
lowercase__ = "image_segmenter"
lowercase__ = CLIPSegForImageSegmentation
lowercase__ = ["image", "text"]
lowercase__ = ["image"]
def __init__( self: Dict, *a_: str, **a_: Tuple ):
'''simple docstring'''
requires_backends(self, ["""vision"""] )
super().__init__(*a_, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: "Image", a_: str ):
'''simple docstring'''
return self.pre_processor(text=[label], images=[image], padding=a_, return_tensors="""pt""" )
def UpperCamelCase_ ( self: int, a_: Tuple ):
'''simple docstring'''
with torch.no_grad():
_snake_case : Dict = self.model(**a_ ).logits
return logits
def UpperCamelCase_ ( self: Any, a_: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = outputs.cpu().detach().numpy()
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 64 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =CLIPTokenizer
_lowercase =CLIPTokenizerFast
_lowercase =True
_lowercase ={}
_lowercase =False
def __a ( self ) -> Dict:
super().setUp()
# fmt: off
lowerCAmelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCamelCase ) )
def __a ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , **_UpperCamelCase ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowerCAmelCase_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@require_ftfy
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase_ = "xa\u0303y" + " " + "x\xe3y"
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase_ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase_ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase_ = f""" {text}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
def __a ( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ) -> str:
super().test_tokenization_python_rust_equals()
def __a ( self ) -> Any:
# CLIP always lower cases letters
pass
| 231 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
"""simple docstring"""
def __init__( self : int,lowercase_ : Any,lowercase_ : Any=3,lowercase_ : Any=3_2,lowercase_ : int=3,lowercase_ : Union[str, Any]=1_0,lowercase_ : Optional[Any]=[1_0, 2_0, 3_0, 4_0],lowercase_ : Tuple=[1, 1, 2, 1],lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : Dict="relu",lowercase_ : Union[str, Any]=3,lowercase_ : List[str]=None,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(lowercase_ )
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,)
def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Dict )-> Dict:
'''simple docstring'''
A__ = TFRegNetModel(config=lowercase_ )
A__ = model(lowercase_,training=lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2),)
def snake_case__ ( self : List[str],lowercase_ : Optional[Any],lowercase_ : Tuple,lowercase_ : int )-> List[Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFRegNetForImageClassification(lowercase_ )
A__ = model(lowercase_,labels=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase : Tuple = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase : List[str] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Tuple = False
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
def snake_case__ ( self : Dict )-> Tuple:
'''simple docstring'''
A__ = TFRegNetModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_ )
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0,reason='TF does not support backprop for grouped convolutions on CPU.',)
@slow
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : str )-> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase_ : str,lowercase_ : Optional[int],lowercase_ : List[str] ):
A__ = model_class(lowercase_ )
A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ),training=lowercase_ )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ),expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 2, self.model_tester.image_size // 2],)
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A__ = layer_type
A__ = True
check_hidden_states_output(lowercase_,lowercase_,lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase_ : Optional[Any],lowercase_ : List[Any],lowercase_ : str,lowercase_ : int={} ):
A__ = model(lowercase_,return_dict=lowercase_,**lowercase_ )
A__ = model(lowercase_,return_dict=lowercase_,**lowercase_ ).to_tuple()
def recursive_check(lowercase_ : str,lowercase_ : int ):
if isinstance(lowercase_,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase_,lowercase_ ):
recursive_check(lowercase_,lowercase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase_,lowercase_ ) ),msg=(
'Tuple and dict output are not equal. Difference:'
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
),)
recursive_check(lowercase_,lowercase_ )
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = self._prepare_for_class(lowercase_,lowercase_ )
A__ = self._prepare_for_class(lowercase_,lowercase_ )
check_equivalence(lowercase_,lowercase_,lowercase_ )
A__ = self._prepare_for_class(lowercase_,lowercase_,return_labels=lowercase_ )
A__ = self._prepare_for_class(lowercase_,lowercase_,return_labels=lowercase_ )
check_equivalence(lowercase_,lowercase_,lowercase_ )
A__ = self._prepare_for_class(lowercase_,lowercase_ )
A__ = self._prepare_for_class(lowercase_,lowercase_ )
check_equivalence(lowercase_,lowercase_,lowercase_,{'output_hidden_states': True} )
A__ = self._prepare_for_class(lowercase_,lowercase_,return_labels=lowercase_ )
A__ = self._prepare_for_class(lowercase_,lowercase_,return_labels=lowercase_ )
check_equivalence(lowercase_,lowercase_,lowercase_,{'output_hidden_states': True} )
def snake_case__ ( self : Optional[int] )-> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFRegNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _snake_case( ) -> Union[str, Any]:
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Dict )-> Optional[int]:
'''simple docstring'''
A__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase_,return_tensors='tf' )
# forward pass
A__ = model(**lowercase_,training=lowercase_ )
# verify the logits
A__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
| 367 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class A ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
super().__init__()
A__ = nn.Linear(3,4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4,5 )
def snake_case__ ( self : Dict,lowercase_ : Union[str, Any] )-> Tuple:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,model.state_dict() )
A__ = os.path.join(lowercase_,'index.json' )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ = os.path.join(lowercase_,F'{key}.dat' )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on the fact weights are properly loaded
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ = torch.randn(2,3,dtype=lowercase_ )
with TemporaryDirectory() as tmp_dir:
A__ = offload_weight(lowercase_,'weight',lowercase_,{} )
A__ = os.path.join(lowercase_,'weight.dat' )
self.assertTrue(os.path.isfile(lowercase_ ) )
self.assertDictEqual(lowercase_,{'weight': {'shape': [2, 3], 'dtype': str(lowercase_ ).split('.' )[1]}} )
A__ = load_offloaded_weight(lowercase_,index['weight'] )
self.assertTrue(torch.equal(lowercase_,lowercase_ ) )
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
A__ = ModelForTest()
A__ = model.state_dict()
A__ = {k: v for k, v in state_dict.items() if 'linear2' not in k}
A__ = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
A__ = {k: v for k, v in state_dict.items() if 'weight' in k}
A__ = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
# Duplicates are removed
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
A__ = {'a.1': 0, 'a.10': 1, 'a.2': 2}
A__ = extract_submodules_state_dict(lowercase_,['a.1', 'a.2'] )
self.assertDictEqual(lowercase_,{'a.1': 0, 'a.2': 2} )
A__ = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
A__ = extract_submodules_state_dict(lowercase_,['a.1', 'a.2'] )
self.assertDictEqual(lowercase_,{'a.1.a': 0, 'a.2.a': 2} )
| 282 | 0 |
"""simple docstring"""
import operator as op
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = []
a_ = lambda UpperCAmelCase , UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
a_ = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(UpperCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
else:
a_ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
a_ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
stack.append(
str(opr[x](int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
UpperCamelCase_ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 243 |
'''simple docstring'''
import requests
a_ = 'YOUR API KEY'
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str = giphy_api_key ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any ='''+'''.join(query.split() )
SCREAMING_SNAKE_CASE__ : int =f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
SCREAMING_SNAKE_CASE__ : Dict =requests.get(UpperCamelCase__ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship'))) | 152 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__A = get_tests_dir('''fixtures''')
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : int = mock.Mock()
__lowerCamelCase : Optional[int] = 500
__lowerCamelCase : str = {}
__lowerCamelCase : Tuple = HTTPError
__lowerCamelCase : Optional[int] = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Dict = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase ) as mock_head:
__lowerCamelCase : Any = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : List[Any] = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def lowerCamelCase__ ( self : Optional[Any] ):
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : str = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
__lowerCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(UpperCAmelCase )
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : Tuple ):
__lowerCamelCase : Tuple = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
__lowerCamelCase : Optional[int] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase , repo_id="test-image-processor" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCamelCase : Optional[Any] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : List[Any] = ViTImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
__lowerCamelCase : int = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : Optional[Any] ):
CustomImageProcessor.register_for_auto_class()
__lowerCamelCase : List[str] = CustomImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" ) | 371 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = ["pixel_values"]
def __init__( self : List[str] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : List[str] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : int = size if size is not None else {"height": 256, "width": 256}
__lowerCamelCase : str = get_size_dict(UpperCAmelCase )
__lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
__lowerCamelCase : Optional[Any] = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Any = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : str = resample
__lowerCamelCase : str = do_center_crop
__lowerCamelCase : List[str] = crop_size
__lowerCamelCase : Union[str, Any] = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
__lowerCamelCase : int = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ):
__lowerCamelCase : Optional[int] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : int , ):
__lowerCamelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Tuple = resample if resample is not None else self.resample
__lowerCamelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : int = image_std if image_std is not None else self.image_std
__lowerCamelCase : Optional[int] = size if size is not None else self.size
__lowerCamelCase : Optional[Any] = get_size_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Dict = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Optional[Any] = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowerCamelCase : Optional[int] = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase : Optional[int] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCamelCase : int = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase : List[str] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase : Optional[int] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
__lowerCamelCase : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
__lowerCamelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) | 64 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowercase :str = LxmertConfig.from_json_file(lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
lowercase :Dict = LxmertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 236 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Return True if there is node that has not iterated.
lowercase :Union[str, Any] = [False] * len(lowerCamelCase )
lowercase :Union[str, Any] = []
queue.append(lowerCamelCase )
lowercase :Optional[Any] = True
while queue:
lowercase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
lowercase :Dict = True
lowercase :Dict = u
return visited[t]
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# This array is filled by BFS and to store path
lowercase :Optional[int] = [-1] * (len(lowerCamelCase ))
lowercase :int = 0
while bfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :int = float("Inf" )
lowercase :Any = sink
while s != source:
# Find the minimum value in select path
lowercase :Any = min(lowerCamelCase, graph[parent[s]][s] )
lowercase :Dict = parent[s]
max_flow += path_flow
lowercase :Union[str, Any] = sink
while v != source:
lowercase :List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase :Any = parent[v]
return max_flow
_UpperCAmelCase : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCAmelCase , _UpperCAmelCase : Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 236 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ , lowerCamelCase__ : str = 1, 1
lowerCamelCase__ : Dict = []
for i in range(1 , n + 1 ):
lowerCamelCase__ : List[Any] = prev_numerator + 2 * prev_denominator
lowerCamelCase__ : List[str] = prev_numerator + prev_denominator
if len(str(UpperCamelCase ) ) > len(str(UpperCamelCase ) ):
result.append(UpperCamelCase )
lowerCamelCase__ : str = numerator
lowerCamelCase__ : List[Any] = denominator
return len(UpperCamelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 129 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_A : Union[str, Any] =8
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> Tuple:
lowerCamelCase__ : List[str] = x.device
lowerCamelCase__ : Any = (x * 255).int().clamp(0 , 255 )
lowerCamelCase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
lowerCamelCase__ : int = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b c h w -> b c 1 h w""" )
lowerCamelCase__ : Tuple = ((x & mask) != 0).float()
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """b c d h w -> b (c d) h w""" )
lowerCamelCase__ : Optional[int] = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> List[Any]:
lowerCamelCase__ : List[Any] = x.device
lowerCamelCase__ : Dict = (x > 0).int()
lowerCamelCase__ : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
lowerCamelCase__ : List[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = True , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[timestep]
lowerCamelCase__ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase__ : Dict = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : Optional[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase__ : Tuple = self._get_variance(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase__ : Dict = model_output.device if torch.is_tensor(UpperCamelCase ) else """cpu"""
lowerCamelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
lowerCamelCase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="epsilon" , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
lowerCamelCase__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase__ : List[str] = None
# 1. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[t]
lowerCamelCase__ : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase__ : str = 1 - alpha_prod_t
lowerCamelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase__ : Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCamelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase__ : Optional[Any] = 0
if t > 0:
lowerCamelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
lowerCamelCase__ : str = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
lowerCamelCase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: UNetaDConditionModel , UpperCamelCase__: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase__: Optional[float] = 1.0 , ):
super().__init__()
lowerCamelCase__ : Optional[int] = bit_scale
lowerCamelCase__ : List[Any] = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 50 , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
lowerCamelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = decimal_to_bits(UpperCamelCase__ ) * self.bit_scale
lowerCamelCase__ : Union[str, Any] = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase__ : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Any = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Dict = bits_to_decimal(UpperCamelCase__ )
if output_type == "pil":
lowerCamelCase__ : int = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 129 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45 |
"""simple docstring"""
import numpy as np
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
'''simple docstring'''
from math import factorial
def lowerCamelCase__ ( __lowerCamelCase : int = 2_0 ):
'''simple docstring'''
_UpperCAmelCase : int =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCAmelCase : Tuple =n // 2
return int(factorial(__lowerCamelCase ) / (factorial(__lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase =int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 364 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase =logging.get_logger(__name__)
lowercase ='T5Config'
def lowerCamelCase__ ( __lowerCamelCase : jnp.array , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =jnp.zeros_like(__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCAmelCase : List[Any] =shifted_input_ids.at[:, 0].set(__lowerCamelCase )
_UpperCAmelCase : str =jnp.where(shifted_input_ids == -1_0_0 , __lowerCamelCase , __lowerCamelCase )
return shifted_input_ids
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="mt5"
UpperCAmelCase =MTaConfig
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="mt5"
UpperCAmelCase =MTaConfig
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="mt5"
UpperCAmelCase =MTaConfig
| 242 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : List[Any] = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : List[Any] =["flax"]
def __init__( self : Union[str, Any] , *a : Optional[int] , **a : Dict ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : Optional[Any] , **a : str ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *a : Optional[Any] , **a : int ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Any =["flax"]
def __init__( self : Dict , *a : Dict , **a : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : str , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *a : Tuple , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : int =["flax"]
def __init__( self : Optional[Any] , *a : List[str] , **a : int ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : List[str] , **a : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *a : List[str] , **a : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Dict =["flax"]
def __init__( self : Optional[int] , *a : Union[str, Any] , **a : str ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *a : Any , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : Optional[Any] , **a : Any ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =["flax"]
def __init__( self : str , *a : Optional[int] , **a : Dict ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : Dict , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *a : Any , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] =["flax"]
def __init__( self : Optional[int] , *a : str , **a : int ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : int , **a : str ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : str , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Tuple =["flax"]
def __init__( self : Optional[int] , *a : int , **a : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *a : Optional[int] , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *a : Optional[int] , **a : str ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["flax"]
def __init__( self : Union[str, Any] , *a : Union[str, Any] , **a : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *a : Optional[int] , **a : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *a : List[Any] , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : List[str] =["flax"]
def __init__( self : Tuple , *a : Dict , **a : int ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *a : Optional[int] , **a : Any ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *a : Union[str, Any] , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : str =["flax"]
def __init__( self : int , *a : Any , **a : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *a : List[Any] , **a : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *a : str , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : List[str] =["flax"]
def __init__( self : Tuple , *a : str , **a : Any ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : Optional[Any] , **a : int ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *a : Tuple , **a : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Any =["flax"]
def __init__( self : str , *a : Union[str, Any] , **a : str ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *a : Optional[int] , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *a : Optional[int] , **a : int ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Any =["flax"]
def __init__( self : Union[str, Any] , *a : Any , **a : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *a : Dict , **a : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *a : str , **a : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
| 365 | '''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =LongformerTokenizer
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[str] =LongformerTokenizerFast
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a ) )
def SCREAMING_SNAKE_CASE__ ( self : int , **a : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : str , **a : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCamelCase = tokenizer.tokenize(a ) # , add_prefix_space=True)
self.assertListEqual(a , a )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = '''Encode this sequence.'''
__lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a , a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a , a )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a , a )
# Testing spaces after special tokens
__lowerCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(a , lstrip=a , rstrip=a )} ) # mask token has a left space
__lowerCamelCase = tokenizer.convert_tokens_to_ids(a )
__lowerCamelCase = '''Encode <mask> sequence'''
__lowerCamelCase = '''Encode <mask>sequence'''
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = encoded.index(a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a , a )
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = encoded.index(a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = '''A, <mask> AllenNLP sentence.'''
__lowerCamelCase = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
__lowerCamelCase = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a )
self.assertEqual(post_processor_state['''add_prefix_space'''] , a )
self.assertEqual(post_processor_state['''trim_offsets'''] , a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase = f"""{text_of_1_token} {text_of_1_token}"""
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
| 237 | 0 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = x
_UpperCAmelCase : Dict = y
for step in range(__lowerCAmelCase ): # noqa: B007
_UpperCAmelCase : Optional[Any] = a * a - b * b + x
_UpperCAmelCase : int = 2 * a * b + y
_UpperCAmelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCAmelCase (__lowerCAmelCase ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __lowerCAmelCase (__lowerCAmelCase ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCAmelCase , 1 , 1 ) )
def __lowerCAmelCase (__lowerCAmelCase = 800 , __lowerCAmelCase = 600 , __lowerCAmelCase = -0.6 , __lowerCAmelCase = 0 , __lowerCAmelCase = 3.2 , __lowerCAmelCase = 50 , __lowerCAmelCase = True , ):
_UpperCAmelCase : Union[str, Any] = Image.new("RGB" , (image_width, image_height) )
_UpperCAmelCase : Dict = img.load()
# loop through the image-coordinates
for image_x in range(__lowerCAmelCase ):
for image_y in range(__lowerCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase : Dict = figure_width / image_width * image_height
_UpperCAmelCase : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase : List[Any] = get_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase : Any = get_color_coded_rgb(__lowerCAmelCase )
else:
_UpperCAmelCase : Optional[Any] = get_black_and_white_rgb(__lowerCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 234 |
'''simple docstring'''
def __lowerCAmelCase ():
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCAmelCase (__lowerCAmelCase ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = 0
_UpperCAmelCase : str = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCAmelCase : Union[str, Any] = (left + right) // 2
_UpperCAmelCase : List[str] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCAmelCase : Tuple = mid + 1
else:
_UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase : Dict = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def __lowerCAmelCase (__lowerCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def __lowerCAmelCase ():
from timeit import timeit
print("Running benchmarks" )
_UpperCAmelCase : Tuple = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCAmelCase : str = timeit(F"""{func}(grid=grid)""" , setup=__lowerCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 234 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ = None ):
'''simple docstring'''
__snake_case : List[str] = value
__snake_case : List[str] = random()
__snake_case : Node | None = None
__snake_case : Node | None = None
def __repr__(self ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = str(self.value ) + ''' '''
__snake_case : Tuple = str(self.left or '''''' )
__snake_case : Union[str, Any] = str(self.right or '''''' )
return value + left + right
def lowercase ( _snake_case : Node | None , _snake_case : int ) ->tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__snake_case , __snake_case : Dict = split(root.left , _snake_case )
return left, root
else:
__snake_case , __snake_case : int = split(root.right , _snake_case )
return root, right
def lowercase ( _snake_case : Node | None , _snake_case : Node | None ) ->Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__snake_case : str = merge(left.right , _snake_case )
return left
else:
__snake_case : Any = merge(_snake_case , right.left )
return right
def lowercase ( _snake_case : Node | None , _snake_case : int ) ->Node | None:
"""simple docstring"""
__snake_case : List[str] = Node(_snake_case )
__snake_case , __snake_case : Union[str, Any] = split(_snake_case , _snake_case )
return merge(merge(_snake_case , _snake_case ) , _snake_case )
def lowercase ( _snake_case : Node | None , _snake_case : int ) ->Node | None:
"""simple docstring"""
__snake_case , __snake_case : List[str] = split(_snake_case , value - 1 )
__snake_case , __snake_case : Any = split(_snake_case , _snake_case )
return merge(_snake_case , _snake_case )
def lowercase ( _snake_case : Node | None ) ->None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowercase ( _snake_case : Node | None , _snake_case : str ) ->Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__snake_case : Optional[Any] = insert(_snake_case , int(arg[1:] ) )
elif arg[0] == "-":
__snake_case : Tuple = erase(_snake_case , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : int = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
__snake_case : Any = input()
while args != "q":
__snake_case : Optional[Any] = interact_treap(_snake_case , _snake_case )
print(_snake_case )
__snake_case : str = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24 | 1 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self , __A ) -> int:
raise NotImplementedError()
def __lowerCAmelCase ( self ) -> Optional[Any]:
raise NotImplementedError()
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A = False , **__A ) -> int:
lowerCAmelCase_ :List[str] = tokenizer
lowerCAmelCase_ :Dict = skip_prompt
lowerCAmelCase_ :Union[str, Any] = decode_kwargs
# variables used in the streaming process
lowerCAmelCase_ :Any = []
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :List[Any] = True
def __lowerCAmelCase ( self , __A ) -> str:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowerCAmelCase_ :Tuple = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase_ :int = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase_ :int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowerCAmelCase_ :List[str] = text[self.print_len :]
lowerCAmelCase_ :Tuple = []
lowerCAmelCase_ :Tuple = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase_ :Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase_ :Tuple = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __lowerCAmelCase ( self ) -> Dict:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase_ :Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase_ :List[str] = text[self.print_len :]
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :int = 0
else:
lowerCAmelCase_ :Tuple = """"""
lowerCAmelCase_ :int = True
self.on_finalized_text(__A , stream_end=__A )
def __lowerCAmelCase ( self , __A , __A = False ) -> Optional[Any]:
print(__A , flush=__A , end="""""" if not stream_end else None )
def __lowerCAmelCase ( self , __A ) -> Dict:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A = False , __A = None , **__A ) -> Dict:
super().__init__(__A , __A , **__A )
lowerCAmelCase_ :Union[str, Any] = Queue()
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :List[Any] = timeout
def __lowerCAmelCase ( self , __A , __A = False ) -> List[str]:
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Optional[Any]:
return self
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :int = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 84 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a : Optional[int] = logging.get_logger(__name__)
# General docstring
a : Union[str, Any] = '''MobileNetV1Config'''
# Base docstring
a : str = '''google/mobilenet_v1_1.0_224'''
a : str = [1, 1024, 7, 7]
# Image classification docstring
a : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
a : Optional[int] = '''tabby, tabby cat'''
a : List[str] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : str , _lowercase : int=None ) ->int:
'''simple docstring'''
a : List[Any] = {}
if isinstance(_lowercase , _lowercase ):
a : Union[str, Any] = model.mobilenet_va
else:
a : List[str] = model
a : Dict = "MobilenetV1/Conv2d_0/"
a : Tuple = backbone.conv_stem.convolution.weight
a : Dict = backbone.conv_stem.normalization.bias
a : Optional[Any] = backbone.conv_stem.normalization.weight
a : Optional[Any] = backbone.conv_stem.normalization.running_mean
a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
a : List[str] = i + 1
a : Dict = i * 2
a : int = backbone.layer[pt_index]
a : List[str] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
a : int = pointer.convolution.weight
a : Union[str, Any] = pointer.normalization.bias
a : Union[str, Any] = pointer.normalization.weight
a : Optional[Any] = pointer.normalization.running_mean
a : Dict = pointer.normalization.running_var
a : List[Any] = backbone.layer[pt_index + 1]
a : Union[str, Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
a : Dict = pointer.convolution.weight
a : Optional[Any] = pointer.normalization.bias
a : Dict = pointer.normalization.weight
a : Optional[Any] = pointer.normalization.running_mean
a : Optional[Any] = pointer.normalization.running_var
if isinstance(_lowercase , _lowercase ):
a : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/"
a : Tuple = model.classifier.weight
a : Optional[int] = model.classifier.bias
return tf_to_pt_map
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[Any] , _lowercase : Tuple ) ->int:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
a : List[Any] = tf.train.list_variables(_lowercase )
a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
a : Union[str, Any] = tf.train.load_variable(_lowercase , _lowercase )
a : Optional[Any] = array
# Build TF to PyTorch weights loading map
a : Tuple = _build_tf_to_pytorch_map(_lowercase , _lowercase , _lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
a : List[str] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
a : List[Any] = np.transpose(_lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
a : Union[str, Any] = array.squeeze().transpose()
else:
a : Any = np.transpose(_lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
a : str = torch.from_numpy(_lowercase )
tf_weights.pop(_lowercase , _lowercase )
tf_weights.pop(name + "/RMSProp" , _lowercase )
tf_weights.pop(name + "/RMSProp_1" , _lowercase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _lowercase )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _SCREAMING_SNAKE_CASE ( _lowercase : torch.Tensor , _lowercase : nn.Convad ) ->torch.Tensor:
'''simple docstring'''
a, a : Any = features.shape[-2:]
a, a : Dict = conv_layer.stride
a, a : int = conv_layer.kernel_size
if in_height % stride_height == 0:
a : Tuple = max(kernel_height - stride_height , 0 )
else:
a : Optional[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
a : Optional[Any] = max(kernel_width - stride_width , 0 )
else:
a : str = max(kernel_width - (in_width % stride_width) , 0 )
a : Any = pad_along_width // 2
a : List[str] = pad_along_width - pad_left
a : List[str] = pad_along_height // 2
a : List[Any] = pad_along_height - pad_top
a : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowercase , _lowercase , "constant" , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> None:
super().__init__()
a : str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
a : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
a : Tuple = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
a : Optional[int] = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
a : int = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__ ):
a : Dict = ACTaFN[config.hidden_act]
else:
a : Union[str, Any] = config.hidden_act
else:
a : int = None
def __a ( self , lowerCAmelCase__ ) -> torch.Tensor:
if self.config.tf_padding:
a : Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution )
a : List[str] = self.convolution(lowerCAmelCase__ )
if self.normalization is not None:
a : int = self.normalization(lowerCAmelCase__ )
if self.activation is not None:
a : Dict = self.activation(lowerCAmelCase__ )
return features
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =MobileNetVaConfig
lowerCamelCase : str =load_tf_weights_in_mobilenet_va
lowerCamelCase : List[str] ="""mobilenet_v1"""
lowerCamelCase : Tuple ="""pixel_values"""
lowerCamelCase : Optional[Any] =False
def __a ( self , lowerCAmelCase__ ) -> None:
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a : Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True ) -> List[str]:
super().__init__(lowerCAmelCase__ )
a : Tuple = config
a : Dict = 32
a : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
a : Dict = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
a : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
a : int = nn.ModuleList()
for i in range(13 ):
a : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
a : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ) )
a : Tuple = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
a : List[str] = self.conv_stem(lowerCAmelCase__ )
a : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
a : List[Any] = layer_module(lowerCAmelCase__ )
if output_hidden_states:
a : Optional[Any] = all_hidden_states + (hidden_states,)
a : Any = hidden_states
if self.pooler is not None:
a : Union[str, Any] = torch.flatten(self.pooler(lowerCAmelCase__ ) , start_dim=1 )
else:
a : List[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ ) -> None:
super().__init__(lowerCAmelCase__ )
a : int = config.num_labels
a : List[Any] = MobileNetVaModel(lowerCAmelCase__ )
a : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
a : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__ )
a : str = nn.Linear(lowerCAmelCase__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : Any = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
a : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
a : Tuple = self.classifier(self.dropout(lowerCAmelCase__ ) )
a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : Any = "single_label_classification"
else:
a : int = "multi_label_classification"
if self.config.problem_type == "regression":
a : Tuple = MSELoss()
if self.num_labels == 1:
a : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a : str = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
a : List[Any] = CrossEntropyLoss()
a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a : int = BCEWithLogitsLoss()
a : Optional[int] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
a : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 105 | 0 |
from ..utils import DummyObject, requires_backends
class __a ( metaclass=__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""note_seq"""]
def __init__( self : Dict , *lowercase_ : List[Any] , **lowercase_ : Tuple ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def _lowerCAmelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def _lowerCAmelCase ( cls : int , *lowercase_ : Any , **lowercase_ : Tuple ):
requires_backends(cls , ['''note_seq'''] )
| 360 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_SCREAMING_SNAKE_CASE : str = False
class __a ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Any =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase__ : Dict =torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] =pipe.dual_guided(
prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
UpperCamelCase__ : str =VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : int =generator.manual_seed(0 )
UpperCamelCase__ : str =pipe.dual_guided(
prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Dict =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : str ='''cyberpunk 2077'''
UpperCamelCase__ : str =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase__ : int =torch.manual_seed(0 )
UpperCamelCase__ : int =pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase__ : List[str] =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Dict =np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Dict ='''A painting of a squirrel eating a burger '''
UpperCamelCase__ : Optional[int] =torch.manual_seed(0 )
UpperCamelCase__ : str =pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCamelCase__ : str =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : List[Any] =np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Optional[Any] =pipe.image_variation(lowercase_ , generator=lowercase_ , output_type='''numpy''' ).images
UpperCamelCase__ : str =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Tuple =np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 157 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = credit_card_number
snake_case_ = 0
snake_case_ = len(snake_case ) - 2
for i in range(snake_case , -1 , -2 ):
# double the value of every second digit
snake_case_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
snake_case_ = cc_number[:i] + str(snake_case ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(snake_case ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(snake_case ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(snake_case ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(snake_case ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 85 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''t5'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[Any]=32128 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : int=64 , _UpperCAmelCase : str=2048 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str=8 , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : Optional[Any]=128 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Union[str, Any]=1e-6 , _UpperCAmelCase : str=1.0 , _UpperCAmelCase : List[str]="relu" , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=0 , _UpperCAmelCase : Dict=1 , **_UpperCAmelCase : Dict , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split("-" )
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == "gated"
if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = "gelu_new"
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ = "past_encoder_sequence + sequence"
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_a , direction="inputs" )
return common_inputs
@property
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return 13
| 369 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
| 241 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
A_ = str(UpperCAmelCase__ )
A_ = [n]
for i in range(1, len(UpperCAmelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if len(str(UpperCAmelCase__ ) ) > 3:
if not is_prime(int(str(UpperCAmelCase__ )[-3:] ) ) or not is_prime(int(str(UpperCAmelCase__ )[:3] ) ):
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase__ = 11 ) -> list[int]:
A_ = []
A_ = 13
while len(UpperCAmelCase__ ) != count:
if validate(UpperCAmelCase__ ):
A_ = list_truncated_nums(UpperCAmelCase__ )
if all(is_prime(UpperCAmelCase__ ) for i in list_nums ):
list_truncated_primes.append(UpperCAmelCase__ )
num += 2
return list_truncated_primes
def UpperCAmelCase__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 162 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
A_ = {}
A_ = job["""started_at"""]
A_ = job["""completed_at"""]
A_ = date_parser.parse(UpperCAmelCase__ )
A_ = date_parser.parse(UpperCAmelCase__ )
A_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A_ = start
A_ = end
A_ = duration_in_min
return job_info
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> Union[str, Any]:
A_ = None
if token is not None:
A_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
A_ = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A_ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase__ ) for job in result["""jobs"""]} )
A_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(UpperCAmelCase__ ):
A_ = requests.get(url + F'''&page={i + 2}''', headers=UpperCAmelCase__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = get_job_time(args.workflow_run_id)
__lowerCamelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 162 | 1 |
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = name
UpperCAmelCase__ = value
UpperCAmelCase__ = weight
def __repr__( self : List[str] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.value
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return self.name
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.weight
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return self.value / self.weight
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = []
UpperCAmelCase__ , UpperCAmelCase__ = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _UpperCamelCase ( ):
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0
UpperCAmelCase_ = True
UpperCAmelCase_ = False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase__ = chain(next_number(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = number_chain
while number < 10000000:
UpperCAmelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10000000 ):
'''simple docstring'''
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 61 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "roberta"
def __init__( self : Dict , snake_case__ : Tuple=50_265 , snake_case__ : str=768 , snake_case__ : Tuple=12 , snake_case__ : Tuple=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=512 , snake_case__ : List[str]=2 , snake_case__ : str=0.02 , snake_case__ : int=1e-12 , snake_case__ : List[str]=1 , snake_case__ : Any=0 , snake_case__ : int=2 , snake_case__ : List[Any]="absolute" , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=None , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase__ ):
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 133 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a_ = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
UpperCamelCase_ : Optional[Any] = SavedModel()
UpperCamelCase_ : str = []
with open(os.path.join(lowerCamelCase__ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
UpperCamelCase_ : Tuple = json.load(lowerCamelCase__ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCamelCase__ )] )
with open(lowerCamelCase__ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase_ : Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase_ : Optional[Any] = sorted(lowerCamelCase__ )
UpperCamelCase_ : List[str] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCamelCase__ )
if strict and len(lowerCamelCase__ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(lowerCamelCase__ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*lowerCamelCase__ , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
a_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 353 | import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any]=1024 ):
UpperCamelCase_, UpperCamelCase_ : int = [], []
UpperCamelCase_ : Dict = list(zip(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_, UpperCamelCase_ : int = sorted_examples[0]
def is_too_big(lowerCamelCase : str ):
return tok(lowerCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase_ : Optional[Any] = new_src + ' ' + src
UpperCamelCase_ : int = new_tgt + ' ' + tgt
if is_too_big(lowerCamelCase ) or is_too_big(lowerCamelCase ): # cant fit, finalize example
finished_src.append(lowerCamelCase )
finished_tgt.append(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_ : Dict = src, tgt
else: # can fit, keep adding
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCamelCase )
finished_tgt.append(lowerCamelCase )
return finished_src, finished_tgt
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Path , lowerCamelCase : Tuple , lowerCamelCase : Dict ):
UpperCamelCase_ : List[Any] = Path(lowerCamelCase )
save_path.mkdir(exist_ok=lowerCamelCase )
for split in ["train"]:
UpperCamelCase_, UpperCamelCase_ : Any = data_dir / F"{split}.source", data_dir / F"{split}.target"
UpperCamelCase_ : List[Any] = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()]
UpperCamelCase_ : Optional[int] = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()]
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = pack_examples(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(F"packed {split} split from {len(lowerCamelCase )} examples -> {len(lowerCamelCase )}." )
Path(save_path / F"{split}.source" ).open('w' ).write('\n'.join(lowerCamelCase ) )
Path(save_path / F"{split}.target" ).open('w' ).write('\n'.join(lowerCamelCase ) )
for split in ["val", "test"]:
UpperCamelCase_, UpperCamelCase_ : Any = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(lowerCamelCase , save_path / F"{split}.source" )
shutil.copyfile(lowerCamelCase , save_path / F"{split}.target" )
def __lowercase ( ):
UpperCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=lowerCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=lowerCamelCase , default=128 )
parser.add_argument('--data_dir' , type=lowerCamelCase )
parser.add_argument('--save_path' , type=lowerCamelCase )
UpperCamelCase_ : Tuple = parser.parse_args()
UpperCamelCase_ : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 50 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase__ = {'facebook/bart-base': BartTokenizer}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''', type=UpperCAmelCase_, default=UpperCAmelCase_, help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''', type=UpperCAmelCase_, default=5, help='''The maximum total input sequence length after tokenization.''', )
parser.add_argument(
'''--num_beams''', type=UpperCAmelCase_, default=UpperCAmelCase_, help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
), )
parser.add_argument(
'''--model_name_or_path''', type=UpperCAmelCase_, help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=UpperCAmelCase_, )
parser.add_argument(
'''--config_name''', type=UpperCAmelCase_, default=UpperCAmelCase_, help='''Pretrained config name or path if not the same as model_name''', )
parser.add_argument(
'''--device''', type=UpperCAmelCase_, default='''cpu''', help='''Device where the model will be run''', )
parser.add_argument('''--output_file_path''', type=UpperCAmelCase_, default=UpperCAmelCase_, help='''Where to store the final ONNX file.''' )
_lowerCamelCase : Tuple = parser.parse_args()
return args
def snake_case_ ( A_ : Optional[Any], A_ : int="cpu" ):
'''simple docstring'''
_lowerCamelCase : Any = model_dict[model_name].from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
_lowerCamelCase : Union[str, Any] = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase_ )
if model_name in ["facebook/bart-base"]:
_lowerCamelCase : Any = 0
_lowerCamelCase : int = None
_lowerCamelCase : int = 0
return huggingface_model, tokenizer
def snake_case_ ( A_ : Tuple, A_ : int, A_ : List[str], A_ : List[str], A_ : int ):
'''simple docstring'''
model.eval()
_lowerCamelCase : str = None
_lowerCamelCase : str = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase_ ) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = 'My friends are cool but they eat too many carbs.'
_lowerCamelCase : Dict = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=10_24, return_tensors='''pt''' ).to(model.device )
_lowerCamelCase : List[Any] = model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], num_beams=UpperCAmelCase_, max_length=UpperCAmelCase_, early_stopping=UpperCAmelCase_, decoder_start_token_id=model.config.decoder_start_token_id, )
torch.onnx.export(
UpperCAmelCase_, (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
), UpperCAmelCase_, opset_version=14, input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''], output_names=['''output_ids'''], dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
}, example_outputs=UpperCAmelCase_, )
logger.info('''Model exported to {}'''.format(UpperCAmelCase_ ) )
_lowerCamelCase : List[Any] = remove_dup_initializers(os.path.abspath(UpperCAmelCase_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(UpperCAmelCase_ ) )
_lowerCamelCase : Tuple = onnxruntime.InferenceSession(UpperCAmelCase_ )
_lowerCamelCase : Dict = ort_sess.run(
UpperCAmelCase_, {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(UpperCAmelCase_ ),
'''max_length''': np.array(UpperCAmelCase_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
}, )
np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1E-3, atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = parse_args()
_lowerCamelCase : Optional[int] = 5
_lowerCamelCase : List[str] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCamelCase : Union[str, Any] = torch.device(args.device )
_lowerCamelCase : Tuple = load_model_tokenizer(args.model_name_or_path, UpperCAmelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(UpperCAmelCase_ )
if args.max_length:
_lowerCamelCase : Optional[int] = args.max_length
if args.num_beams:
_lowerCamelCase : Any = args.num_beams
if args.output_file_path:
_lowerCamelCase : str = args.output_file_path
else:
_lowerCamelCase : List[Any] = 'BART.onnx'
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 72 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase__ ( lowercase ):
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : str = '1'
_UpperCamelCase : Union[str, Any] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : List[Any] = self.get_env()
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_UpperCamelCase : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : Dict = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : int = '\nfrom transformers import pipeline\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_UpperCamelCase : Union[str, Any] = self.get_env()
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = '\nfrom transformers import AutoModel\n '
_UpperCamelCase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : Optional[int] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 83 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase: int = 'src/transformers'
lowerCAmelCase: Dict = 'docs/source/en/tasks'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : int = f.readlines()
# Find the start prompt.
a : str = 0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
a : Tuple = start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase: int = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase: str = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCamelCase__ ( _A ):
a : Dict = TASK_GUIDE_TO_MODELS[task_guide]
a : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_A , set() )
a : Dict = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCamelCase__ ( _A , _A=False ):
a : List[Any] = _find_text_in_file(
filename=os.path.join(_A , _A ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a : str = get_model_list_for_task(_A )
if current_list != new_list:
if overwrite:
with open(os.path.join(_A , _A ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase: Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase: Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = ['PoolFormerFeatureExtractor']
lowerCAmelCase: Tuple = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure) | 96 | 0 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( lowerCamelCase_ ):
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase_ , )
| 93 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Union[str, Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Tuple = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : List[str] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case__ : List[str] = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Any = PRETRAINED_INIT_CONFIGURATION
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = SqueezeBertTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Tuple="[UNK]" , UpperCAmelCase__ : Union[str, Any]="[SEP]" , UpperCAmelCase__ : Optional[int]="[PAD]" , UpperCAmelCase__ : int="[CLS]" , UpperCAmelCase__ : int="[MASK]" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] , ) -> Union[str, Any]:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE = getattr(__a , normalizer_state.pop("type" ) )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE = normalizer_class(**__a )
__SCREAMING_SNAKE_CASE = do_lower_case
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=None ) -> Tuple:
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Dict:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> int:
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 368 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ) , lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 195 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , *lowercase : Dict , **lowercase : Any ):
"""simple docstring"""
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 223 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 | 1 |
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
def merge(lowercase__ , lowercase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase__ ) <= 1:
return collection
__lowercase= len(lowercase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 304 |
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __snake_case :
_a : Optional[Any]= PegasusConfig
_a : Any= {}
_a : List[Any]= "gelu"
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=False ,snake_case=99 ,snake_case=32 ,snake_case=2 ,snake_case=4 ,snake_case=37 ,snake_case=0.1 ,snake_case=0.1 ,snake_case=40 ,snake_case=2 ,snake_case=1 ,snake_case=0 ,):
'''simple docstring'''
lowercase : Optional[int] = parent
lowercase : Tuple = batch_size
lowercase : str = seq_length
lowercase : Optional[Any] = is_training
lowercase : Optional[Any] = use_labels
lowercase : List[Any] = vocab_size
lowercase : Any = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : List[Any] = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : List[str] = eos_token_id
lowercase : Optional[Any] = pad_token_id
lowercase : Tuple = bos_token_id
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowercase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowercase : List[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : Dict = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowercase : Union[str, Any] = prepare_pegasus_inputs_dict(snake_case ,snake_case ,snake_case )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = TFPegasusModel(config=snake_case ).get_decoder()
lowercase : List[Any] = inputs_dict["""input_ids"""]
lowercase : List[Any] = input_ids[:1, :]
lowercase : Tuple = inputs_dict["""attention_mask"""][:1, :]
lowercase : Optional[int] = inputs_dict["""head_mask"""]
lowercase : List[str] = 1
# first forward pass
lowercase : Optional[Any] = model(snake_case ,attention_mask=snake_case ,head_mask=snake_case ,use_cache=snake_case )
lowercase , lowercase : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : List[str] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
lowercase : Optional[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
lowercase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
lowercase : str = model(snake_case ,attention_mask=snake_case )[0]
lowercase : List[str] = model(snake_case ,attention_mask=snake_case ,past_key_values=snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
lowercase : int = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
lowercase : str = output_from_no_past[:, -3:, random_slice_idx]
lowercase : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case ,snake_case ,rtol=1e-3 )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
if attention_mask is None:
lowercase : int = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_a : Dict= (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_a : str= (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a : int= True
_a : Dict= False
_a : Optional[int]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = TFPegasusModelTester(self )
lowercase : Any = ConfigTester(self ,config_class=snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
_a : Optional[int]= [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_a : Optional[Any]= [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_a : Tuple= "google/pegasus-xsum"
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : int = self.translate_src_text(**snake_case )
assert self.expected_text == generated_words
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : Any = self.tokenizer(self.src_text ,**snake_case ,padding=snake_case ,return_tensors="""tf""" )
lowercase : Tuple = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=snake_case ,)
lowercase : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=snake_case )
return generated_words
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 20 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_A : int = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class a__ ( unittest.TestCase, a_ ):
def __magic_name__ ( self ):
lowercase : Tuple = load_tool("text-question-answering" )
self.tool.setup()
lowercase : Dict = load_tool("text-question-answering" , remote=_a )
def __magic_name__ ( self ):
lowercase : str = self.tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.remote_tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : int = self.tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.remote_tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
| 202 | 0 |
'''simple docstring'''
from typing import List
import numpy as np
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
A_ : Optional[Any] = max(lists_lengths.values() , default=0 )
return max(1 , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = []
for group_idx in range(lowerCamelCase__ ):
A_ : Optional[int] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A_ : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A_ : List[Any] = range(lowerCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCamelCase__ )
return shards_indices_per_group
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = _number_of_shards_in_gen_kwargs(lowerCamelCase__ )
if num_shards == 1:
return [dict(lowerCamelCase__ )]
else:
A_ : str = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCamelCase__ ) )
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
A_ : str = {}
for size in list_sizes:
A_ : Any = list(range(lowerCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A_ : str = dict(lowerCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : int = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]]
return shuffled_kwargs | 363 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase :Tuple = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ):
A_ : Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A_ : List[str] = 3
A_ : List[str] = do_lower_case
A_ : Tuple = remove_space
A_ : Tuple = keep_accents
A_ : Tuple = vocab_file
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
A_ : Tuple = jieba
A_ : int = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a (self ):
return len(self.sp_model )
def _a (self ):
A_ : str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
A_ : Optional[Any] = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__(self , lowercase ):
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : List[Any] = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , lowercase ):
if self.remove_space:
A_ : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
A_ : Optional[int] = inputs
A_ : List[str] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A_ : str = unicodedata.normalize("""NFKD""" , lowercase )
A_ : Union[str, Any] = """""".join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
A_ : int = outputs.lower()
return outputs
def _a (self , lowercase ):
A_ : Optional[int] = self.preprocess_text(lowercase )
A_ : Dict = self.sp_model.encode(lowercase , out_type=lowercase )
A_ : List[Any] = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A_ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Dict = cur_pieces[1:]
else:
A_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def _a (self , lowercase ):
return self.sp_model.PieceToId(lowercase )
def _a (self , lowercase ):
return self.sp_model.IdToPiece(lowercase )
def _a (self , lowercase ):
A_ : int = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _a (self , lowercase , lowercase = None ):
A_ : List[str] = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def _a (self , lowercase , lowercase = None ):
A_ : str = [self.sep_token_id]
A_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Tuple = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _a (self , *lowercase , **lowercase ):
A_ : Any = super()._decode(*lowercase , **lowercase )
A_ : int = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text | 135 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( __A, __A=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], __A )
| 65 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__snake_case: Dict = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = """sgugger/tiny-distilbert-classification"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
__snake_case: Any = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: int = TensorFlowBenchmark(A )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: int = AutoConfig.from_pretrained(A )
__snake_case: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A , [config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
__snake_case: Tuple = AutoConfig.from_pretrained(A )
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = TensorFlowBenchmark(A , [config] )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = """sshleifer/tiny-gpt2"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = AutoConfig.from_pretrained(A )
__snake_case: Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[str] = TensorFlowBenchmark(A , [config] )
__snake_case: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = """patrickvonplaten/t5-tiny-random"""
__snake_case: List[str] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Optional[int] = TensorFlowBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = """sshleifer/tiny-gpt2"""
__snake_case: List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(A , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(A , """env.csv""" ) , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A : Union[str, Any] ):
self.assertTrue(hasattr(A , """sequential""" ) )
self.assertTrue(hasattr(A , """cumulative""" ) )
self.assertTrue(hasattr(A , """current""" ) )
self.assertTrue(hasattr(A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , """log.txt""" ) , log_print=A , trace_memory_line_by_line=A , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(A , """log.txt""" ) ).exists() )
| 111 | 0 |
import random
from typing import Any
def UpperCamelCase__( UpperCamelCase__ : list )->list[Any]:
for _ in range(len(UpperCamelCase__ ) ):
A__ = random.randint(0 , len(UpperCamelCase__ ) - 1 )
A__ = random.randint(0 , len(UpperCamelCase__ ) - 1 )
A__ , A__ = data[b], data[a]
return data
if __name__ == "__main__":
a__: int = [0, 1, 2, 3, 4, 5, 6, 7]
a__: List[Any] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 39 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
| 39 | 1 |
from math import sqrt
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE : str = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE : Any = False
for divisor in range(2 , int(round(sqrt(lowercase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE : List[Any] = False
break
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'status' must been from type bool"
return status
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE : List[Any] = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase__ ):
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE : str = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
__SCREAMING_SNAKE_CASE : Any = number
if number == 0 or number == 1:
ans.append(lowercase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase__ ):
while quotient != 1:
if is_prime(lowercase__ ) and (quotient % factor == 0):
ans.append(lowercase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE : Optional[Any] = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = max(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE : int = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE : List[Any] = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 == 0
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 != 0
def _UpperCamelCase ( lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and (number > 2) and is_even(lowercase__ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE : Tuple = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE : Optional[Any] = get_prime_numbers(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = len(lowercase__ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Tuple = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE : Any = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE : List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (len(lowercase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE : Any = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE : List[Any] = numbera % numbera
__SCREAMING_SNAKE_CASE : Optional[int] = numbera
__SCREAMING_SNAKE_CASE : Optional[int] = rest
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE : Any = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = prime_factorization(lowercase__ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = max(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE : int = prime_fac_a.count(lowercase__ )
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(max(lowercase__ , lowercase__ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase__ ):
ans += 1
# precondition
assert isinstance(lowercase__ , lowercase__ ) and is_prime(
lowercase__ ), "'ans' must been a prime number and from type int"
return ans
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
is_prime(lowercase__ ) and is_prime(lowercase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE : Optional[Any] = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE : Optional[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
while number < p_number_a:
ans.append(lowercase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and ans[0] != p_number_a
and ans[len(lowercase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase__ )
# precondition
assert ans[0] == 1 and ans[len(lowercase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE : str = get_divisors(lowercase__ )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (divisors[0] == 1)
and (divisors[len(lowercase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE : Optional[Any] = gcd(abs(lowercase__ ) , abs(lowercase__ ) )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ans
ans += fiba
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp
return ans
| 9 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __snake_case :
def __init__( self : Tuple , A_ : Any , A_ : Tuple=1_3 , A_ : str=7 , A_ : Any=True , A_ : Union[str, Any]=True , A_ : int=False , A_ : int=True , A_ : List[Any]=9_9 , A_ : Dict=6_4 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=6_4 , A_ : str="gelu" , A_ : Union[str, Any]=0.1 , A_ : List[Any]=0.1 , A_ : Any=5_1_2 , A_ : Union[str, Any]=1_6 , A_ : str=2 , A_ : Any=0.02 , A_ : str=3 , A_ : Optional[int]=4 , A_ : int=None , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : str = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Dict):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''')
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : int = None
if self.use_input_mask:
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict , A_ : Dict , A_ : int , A_ : Tuple , A_ : List[str] , A_ : str , A_ : List[Any]):
lowerCAmelCase_ : int = MPNetModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Any = model(A_ , A_)
lowerCAmelCase_ : Union[str, Any] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, Any] , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any):
lowerCAmelCase_ : Any = MPNetForQuestionAnswering(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple , A_ : List[str] , A_ : Optional[Any] , A_ : Dict , A_ : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Any = MPNetForSequenceClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Dict = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : Dict , A_ : List[str] , A_ : List[Any]):
lowerCAmelCase_ : int = self.num_choices
lowerCAmelCase_ : List[str] = MPNetForMultipleChoice(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : Optional[int] = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Any , A_ : int , A_ : Any , A_ : List[Any] , A_ : Any , A_ : Union[str, Any]):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Tuple = MPNetForTokenClassification(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = True
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = MPNetModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : Any):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A_)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A_)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = MPNetModel.from_pretrained('''microsoft/mpnet-base''')
lowerCAmelCase_ : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowerCAmelCase_ : Union[str, Any] = model(A_)[0]
lowerCAmelCase_ : Optional[int] = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , A_)
lowerCAmelCase_ : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]])
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4))
| 103 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Any = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : Optional[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = model
def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE = Adafactor(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = AdamW(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE = optimizer
__SCREAMING_SNAKE_CASE = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.validation_end(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
__SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" )
__SCREAMING_SNAKE_CASE = self.step_count
self.model.save_pretrained(__SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""]
__SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log results
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str:
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=a__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a__ )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a__ )
if logging_callback is None:
__SCREAMING_SNAKE_CASE = LoggingCallback()
__SCREAMING_SNAKE_CASE = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = """ddp"""
__SCREAMING_SNAKE_CASE = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args(
a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , )
if args.do_train:
trainer.fit(a__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = (DDPMScheduler,)
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 331 | 1 |
"""simple docstring"""
import inspect
import unittest
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowercase__ ( self ):
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase : List[str] = inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase : Tuple = "k-diffusion"
elif backend == "invisible_watermark":
lowerCAmelCase : Optional[int] = "invisible-watermark"
assert backend in deps, f"""{backend} is not in the deps table!"""
| 108 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''CompVis/stable-diffusion-v1-1'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
_SCREAMING_SNAKE_CASE : int = '''CompVis/stable-diffusion-v1-3'''
_SCREAMING_SNAKE_CASE : str = '''CompVis/stable-diffusion-v1-4'''
class a ( __snake_case ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
super()._init_()
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline(
vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , requires_safety_checker=__SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCamelCase ( self : List[str] ) -> Dict[str, Any]:
return {k: getattr(self , __SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ) -> List[Any]:
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> str:
lowerCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 | 0 |
"""simple docstring"""
import os
import sys
import unittest
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase__ = os.path.join(git_repo_path, """src""", """transformers""")
lowercase__ = """
{0} = None
"""
lowercase__ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
lowercase__ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : List[Any] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowercase )
_lowerCamelCase : str = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowercase , 'tokenizers' )
_lowerCamelCase : Dict = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowercase , 'tensorflow_text' )
_lowerCamelCase : Union[str, Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowercase , 'sentencepiece_and_tokenizers' )
_lowerCamelCase : int = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowercase , 'sentencepiece_and_tensorflow_text' )
_lowerCamelCase : Any = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowercase , 'sentencepiece_and_tokenizers_and_vision' )
def A_ ( self ):
_lowerCamelCase : int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowercase )
self.assertIn('tensorflow_text' , lowercase )
self.assertIn('sentencepiece_and_tokenizers' , lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def A_ ( self ):
_lowerCamelCase : Any = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowercase , '\nCONSTANT = None\n' )
_lowerCamelCase : Tuple = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowercase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_lowerCamelCase : Dict = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
_lowerCamelCase : Optional[int] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
_lowerCamelCase : Any = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowercase ) | 12 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ ):
return x[0]
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = get_letter_count(lowercase__ )
_lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase__ )
_lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ )
_lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] )
_lowerCamelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase__ , reverse=lowercase__ )
_lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = get_frequency_order(lowercase__ )
_lowerCamelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCamelCase = logging.getLogger(__name__)
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="summarization"
UpperCAmelCase_ : Any =["loss"]
UpperCAmelCase_ : int =ROUGE_KEYS
UpperCAmelCase_ : Any ="rouge2"
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
__snake_case : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(UpperCAmelCase , num_labels=UpperCAmelCase , mode=self.mode , **UpperCAmelCase )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
__snake_case : int = Path(self.output_dir ) / "metrics.json"
__snake_case : int = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
__snake_case : Any = 0
__snake_case : Any = defaultdict(UpperCAmelCase )
__snake_case : int = self.config.model_type
__snake_case : Dict = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
__snake_case : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__snake_case : Optional[int] = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
__snake_case : Dict = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__snake_case : int = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__snake_case : Union[str, Any] = get_git_info()["repo_sha"]
__snake_case : List[Any] = hparams.num_workers
__snake_case : int = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase ):
__snake_case : int = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__snake_case : Optional[int] = self.decoder_start_token_id
__snake_case : Any = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
__snake_case : int = False
__snake_case : Optional[int] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__snake_case : int = self.hparams.eval_max_gen_length
else:
__snake_case : Tuple = self.model.config.max_length
__snake_case : int = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict[str, List[str]]:
'''simple docstring'''
__snake_case : Optional[int] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCAmelCase , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
__snake_case : Union[str, Any] = True
return readable_batch
def UpperCAmelCase ( self , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.model(UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = self.tokenizer.batch_decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return lmap(str.strip , UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = self.tokenizer.pad_token_id
__snake_case , __snake_case : int = batch["input_ids"], batch["attention_mask"]
__snake_case : int = batch["labels"]
if isinstance(self.model , UpperCAmelCase ):
__snake_case : List[str] = self.model._shift_right(UpperCAmelCase )
else:
__snake_case : Optional[int] = shift_tokens_right(UpperCAmelCase , UpperCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__snake_case : List[Any] = decoder_input_ids
self.save_readable_batch(UpperCAmelCase )
__snake_case : Union[str, Any] = self(UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , use_cache=UpperCAmelCase )
__snake_case : Any = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__snake_case : Tuple = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
__snake_case : str = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__snake_case : Optional[int] = nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
__snake_case , __snake_case : Tuple = label_smoothed_nll_loss(
UpperCAmelCase , UpperCAmelCase , self.hparams.label_smoothing , ignore_index=UpperCAmelCase )
return (loss,)
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : Any = self._step(UpperCAmelCase )
__snake_case : int = dict(zip(self.loss_names , UpperCAmelCase ) )
# tokens per batch
__snake_case : str = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
__snake_case : Union[str, Any] = batch["input_ids"].shape[0]
__snake_case : Any = batch["input_ids"].eq(self.pad ).sum()
__snake_case : str = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self._generative_step(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
__snake_case : Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__snake_case : str = losses["loss"]
__snake_case : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
__snake_case : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__snake_case : torch.FloatTensor = torch.tensor(UpperCAmelCase ).type_as(UpperCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCAmelCase )
__snake_case : Optional[int] = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
__snake_case : int = self.step_count
self.metrics[prefix].append(UpperCAmelCase ) # callback writes this to self.metrics_save_path
__snake_case : Dict = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return calculate_rouge(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> dict:
'''simple docstring'''
__snake_case : Optional[int] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__snake_case : Optional[Any] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=UpperCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__snake_case : Optional[int] = (time.time() - ta) / batch["input_ids"].shape[0]
__snake_case : List[str] = self.ids_to_clean_text(UpperCAmelCase )
__snake_case : List[str] = self.ids_to_clean_text(batch["labels"] )
__snake_case : List[Any] = self._step(UpperCAmelCase )
__snake_case : List[Any] = dict(zip(self.loss_names , UpperCAmelCase ) )
__snake_case : Dict = self.calc_generative_metrics(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = np.mean(lmap(UpperCAmelCase , UpperCAmelCase ) )
base_metrics.update(gen_time=UpperCAmelCase , gen_len=UpperCAmelCase , preds=UpperCAmelCase , target=UpperCAmelCase , **UpperCAmelCase )
return base_metrics
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self._generative_step(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self.validation_epoch_end(UpperCAmelCase , prefix="test" )
def UpperCAmelCase ( self , UpperCAmelCase ) -> SeqaSeqDataset:
'''simple docstring'''
__snake_case : Optional[Any] = self.n_obs[type_path]
__snake_case : Optional[int] = self.target_lens[type_path]
__snake_case : Any = self.dataset_class(
self.tokenizer , type_path=UpperCAmelCase , n_obs=UpperCAmelCase , max_target_length=UpperCAmelCase , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> DataLoader:
'''simple docstring'''
__snake_case : List[str] = self.get_dataset(UpperCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__snake_case : Union[str, Any] = dataset.make_sortish_sampler(UpperCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase , batch_size=UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase , num_workers=self.num_workers , sampler=UpperCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__snake_case : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase , batch_sampler=UpperCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCAmelCase , batch_size=UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase , num_workers=self.num_workers , sampler=UpperCAmelCase , )
def UpperCAmelCase ( self ) -> DataLoader:
'''simple docstring'''
__snake_case : str = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase )
return dataloader
def UpperCAmelCase ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCAmelCase , UpperCAmelCase )
add_generic_args(UpperCAmelCase , UpperCAmelCase )
parser.add_argument(
"--max_source_length" , default=1024 , type=UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=UpperCAmelCase )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=UpperCAmelCase )
parser.add_argument("--max_tokens_per_batch" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("--logger_name" , type=UpperCAmelCase , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=UpperCAmelCase , default=-1 , required=UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=UpperCAmelCase , default=500 , required=UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=UpperCAmelCase , default=-1 , required=UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=UpperCAmelCase , default="summarization" , required=UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=UpperCAmelCase , default=0.0 , required=UpperCAmelCase )
parser.add_argument("--src_lang" , type=UpperCAmelCase , default="" , required=UpperCAmelCase )
parser.add_argument("--tgt_lang" , type=UpperCAmelCase , default="" , required=UpperCAmelCase )
parser.add_argument("--eval_beams" , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase )
parser.add_argument(
"--val_metric" , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=UpperCAmelCase , default=UpperCAmelCase , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=UpperCAmelCase , default=1 , required=UpperCAmelCase , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=UpperCAmelCase , default=-1 , required=UpperCAmelCase , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : int ="translation"
UpperCAmelCase_ : str =["loss"]
UpperCAmelCase_ : Union[str, Any] =["bleu"]
UpperCAmelCase_ : Union[str, Any] ="bleu"
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCAmelCase , **UpperCAmelCase )
__snake_case : Optional[int] = hparams.src_lang
__snake_case : Optional[int] = hparams.tgt_lang
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> dict:
'''simple docstring'''
return calculate_bleu(UpperCAmelCase , UpperCAmelCase )
def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any]=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=lowercase )
check_output_dir(lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__snake_case : SummarizationModule = SummarizationModule(lowercase )
else:
__snake_case : SummarizationModule = TranslationModule(lowercase )
__snake_case : Any = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
__snake_case : Union[str, Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__snake_case : List[str] = os.environ.get("WANDB_PROJECT" , lowercase )
__snake_case : str = WandbLogger(name=model.output_dir.name , project=lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__snake_case : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
__snake_case : Dict = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__snake_case : Optional[int] = False
__snake_case : Dict = args.val_metric == "loss"
__snake_case : pl.Trainer = generic_train(
lowercase , lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowercase ) , early_stopping_callback=lowercase , logger=lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
__snake_case : Tuple = ""
__snake_case : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=lowercase ) )
if checkpoints:
__snake_case : Tuple = checkpoints[-1]
__snake_case : Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
_UpperCamelCase = pl.Trainer.add_argparse_args(parser)
_UpperCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCamelCase = parser.parse_args()
main(args)
| 326 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 326 | 1 |
class A :
'''simple docstring'''
def __init__(self : Union[str, Any] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
lowercase__ = arr.split(""",""" )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowercase__ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A : List[Any] = input('please input some numbers:')
A : int = SubArray(whole_array)
A : str = array.solve_sub_array()
print(('the results is:', re))
| 359 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''convbert'''
def __init__(self : str , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Dict=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : str=3072 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=1E-1_2 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Optional[Any]=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = embedding_size
lowercase__ = head_ratio
lowercase__ = conv_kernel_size
lowercase__ = num_groups
lowercase__ = classifier_dropout
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 146 | 0 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), 0 )
__lowercase = [6_0]
__lowercase = [1_0]
__lowercase = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), 0 )
def _lowercase ( self : Union[str, Any] ):
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), 5 )
def _lowercase ( self : Dict ):
__lowercase = 5_0
__lowercase = [6_0, 1_0_0, 1_2_0]
__lowercase = [1_0, 2_0, 3_0]
__lowercase = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 17 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A = Lock()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Any = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase__ :List[str] = Pipe()
lowerCAmelCase__ :List[Any] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase__ :Dict = temp_rs
lowerCAmelCase__ :Optional[Any] = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
lowerCAmelCase__ :Union[str, Any] = Pipe()
lowerCAmelCase__ :List[str] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase__ :Union[str, Any] = temp_rs
lowerCAmelCase__ :Any = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 293 | 0 |
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a : list )-> None:
"""simple docstring"""
lowercase__ = set_counts
lowercase__ = max(a )
lowercase__ = len(a )
lowercase__ = [1] * num_sets
lowercase__ = list(range(a ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int )-> bool:
"""simple docstring"""
lowercase__ = self.get_parent(a )
lowercase__ = self.get_parent(a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__ = 0
lowercase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__ = 0
lowercase__ = src_parent
lowercase__ = self.set_counts[src_parent]
lowercase__ = max(self.max_set , a )
return True
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int )-> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 369 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCamelCase : bool = field(default=UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_UpperCamelCase : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCamelCase () -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
lowercase__ = import_module('tasks' )
try:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
lowercase__ , lowercase__ = preds.shape
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 269 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 4_5
SCREAMING_SNAKE_CASE__ = 1_5_8_1
SCREAMING_SNAKE_CASE__ = 1_5_1_7
SCREAMING_SNAKE_CASE__ = 1_5_7_0
SCREAMING_SNAKE_CASE__ = 1_5_8_4
SCREAMING_SNAKE_CASE__ = 1_7_9_3
SCREAMING_SNAKE_CASE__ = 1_7_9_5
SCREAMING_SNAKE_CASE__ = 1_9_1_6
SCREAMING_SNAKE_CASE__ = 1_8_6_4
SCREAMING_SNAKE_CASE__ = 1_9_0_5
SCREAMING_SNAKE_CASE__ = 1_9_1_9
SCREAMING_SNAKE_CASE__ = 2_4_2_9
SCREAMING_SNAKE_CASE__ = 2_2_0_8
SCREAMING_SNAKE_CASE__ = 2_4_1_8
SCREAMING_SNAKE_CASE__ = 2_3_2_3
SCREAMING_SNAKE_CASE__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 321 | 1 |
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0 ):
"""simple docstring"""
UpperCamelCase : str = (n * (n + 1) // 2) ** 2
UpperCamelCase : Dict = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 350 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315 | 0 |
from maths.prime_check import is_prime
def UpperCamelCase( __UpperCamelCase : int ):
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCamelCase )
if is_prime(__UpperCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
self.register_modules(
vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,unet=A__ ,scheduler=A__ ,safety_checker=A__ ,feature_extractor=A__ ,)
def A__ ( self ,A__ = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__)
def A__ ( self):
self.enable_attention_slicing(A__)
@torch.no_grad()
def __call__( self ,A__ ,A__ = 5_1_2 ,A__ = 5_1_2 ,A__ = 5_0 ,A__ = 7.5 ,A__ = None ,A__ = 1 ,A__ = 0.0 ,A__ = None ,A__ = None ,A__ = "pil" ,A__ = True ,A__ = None ,A__ = 1 ,A__ = None ,**A__ ,):
if isinstance(A__ ,A__):
lowercase = 1
elif isinstance(A__ ,A__):
lowercase = len(A__)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A__)}')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__)}.')
# get prompt text embeddings
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}')
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 ,A__ ,1)
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt ,A__ ,-1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = ['''''']
elif type(A__) is not type(A__):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A__)} !='
f' {type(A__)}.')
elif isinstance(A__ ,A__):
lowercase = [negative_prompt]
elif batch_size != len(A__):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A__)}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''')
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=A__ ,truncation=A__ ,return_tensors='''pt''' ,)
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(A__ ,A__ ,1)
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt ,A__ ,-1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(
A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(self.device)
lowercase = torch.randn(A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(
self.device)
else:
lowercase = torch.randn(
A__ ,generator=A__ ,device=self.device ,dtype=A__)
lowercase = torch.randn(A__ ,generator=A__ ,device=self.device ,dtype=A__)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
lowercase = latents_reference.to(self.device)
lowercase = latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase = 0 if dx < 0 else dx
lowercase = 0 if dy < 0 else dy
lowercase = max(-dx ,0)
lowercase = max(-dy ,0)
# import pdb
# pdb.set_trace()
lowercase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(A__)):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(A__ ,A__)
# predict the noise residual
lowercase = self.unet(A__ ,A__ ,encoder_hidden_states=A__).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2)
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ ,A__)
lowercase = 1 / 0.18215 * latents
lowercase = self.vae.decode(A__).sample
lowercase = (image / 2 + 0.5).clamp(0 ,1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 ,2 ,3 ,1).float().numpy()
if self.safety_checker is not None:
lowercase = self.feature_extractor(self.numpy_to_pil(A__) ,return_tensors='''pt''').to(
self.device)
lowercase , lowercase = self.safety_checker(
images=A__ ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
lowercase = None
if output_type == "pil":
lowercase = self.numpy_to_pil(A__)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A__ ,nsfw_content_detected=A__)
| 101 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 371 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_A = None
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_A = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
_A = {
"""google/fnet-base""": 5_12,
"""google/fnet-large""": 5_12,
}
_A = """▁"""
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids']
SCREAMING_SNAKE_CASE = FNetTokenizer
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="<unk>" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<pad>" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase , normalized=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase )
else mask_token
)
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = do_lower_case
UpperCAmelCase__ : List[str] = remove_space
UpperCAmelCase__ : Optional[Any] = keep_accents
UpperCAmelCase__ : List[str] = vocab_file
UpperCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 166 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
class UpperCamelCase :
def __init__(self : List[Any] , _A : Optional[int]) -> Any:
__snake_case : Optional[Any] = metric_id
class UpperCamelCase :
UpperCAmelCase : Any = [MetricMock(lowercase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def _lowercase (self : Any) -> str:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if "tmp_path" in args:
__snake_case : Optional[int] = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(UpperCAmelCase_ , match='https://huggingface.co/docs/evaluate' ):
func(*UpperCAmelCase_ )
| 172 | """simple docstring"""
from __future__ import annotations
_a : List[Any]= []
def __UpperCAmelCase ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> bool:
'''simple docstring'''
for i in range(len(UpperCAmelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCAmelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCAmelCase_ , -1 , -1 ) , range(UpperCAmelCase_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCAmelCase_ , -1 , -1 ) , range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def __UpperCAmelCase ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int ) -> bool:
'''simple docstring'''
if row >= len(UpperCAmelCase_ ):
solution.append(UpperCAmelCase_ )
printboard(UpperCAmelCase_ )
print()
return True
for i in range(len(UpperCAmelCase_ ) ):
if is_safe(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
__snake_case : Any = 1
solve(UpperCAmelCase_ , row + 1 )
__snake_case : List[str] = 0
return False
def __UpperCAmelCase ( UpperCAmelCase_ : list[list[int]] ) -> None:
'''simple docstring'''
for i in range(len(UpperCAmelCase_ ) ):
for j in range(len(UpperCAmelCase_ ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
_a : Optional[int]= 8
_a : List[str]= [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 172 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase : int = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCAmelCase : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_UpperCAmelCase : Optional[Any] = {'unk_token': '<unk>'}
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[int] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , **lowerCamelCase__ : Optional[Any] ) ->Any:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , **lowerCamelCase__ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Any ) ->Optional[int]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_rust_tokenizer()
_UpperCAmelCase : str = self.get_image_processor()
_UpperCAmelCase : Tuple = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Dict = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
_UpperCAmelCase : List[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Any = self.prepare_image_inputs()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Tuple = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Dict = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = 'lower newer'
_UpperCAmelCase : Union[str, Any] = processor(text=lowerCamelCase__ )
_UpperCAmelCase : Dict = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = 'lower newer'
_UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
_UpperCAmelCase : Union[str, Any] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : str = processor.batch_decode(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Any = CLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Any = 'lower newer'
_UpperCAmelCase : str = self.prepare_image_inputs()
_UpperCAmelCase : List[str] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 356 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322 | 0 |
'''simple docstring'''
__lowerCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCAmelCase__ ( ) -> None:
A_ = input("""Enter message: """ )
A_ = input("""Enter key [alphanumeric]: """ )
A_ = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
A_ = """encrypt"""
A_ = encrypt_message(UpperCAmelCase__, UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
A_ = """decrypt"""
A_ = decrypt_message(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
return translate_message(UpperCAmelCase__, UpperCAmelCase__, """encrypt""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
return translate_message(UpperCAmelCase__, UpperCAmelCase__, """decrypt""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = []
A_ = 0
A_ = key.upper()
for symbol in message:
A_ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
A_ = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 162 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[list[float]]:
A_ = []
for data in source_data:
for i, el in enumerate(UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCAmelCase__ ) )
return data_lists
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[list[float]]:
A_ = []
for dlist, weight in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = min(UpperCAmelCase__ )
A_ = max(UpperCAmelCase__ )
A_ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A_ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCAmelCase__ )
score_lists.append(UpperCAmelCase__ )
return score_lists
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[float]:
A_ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCAmelCase__ ):
A_ = final_scores[j] + ele
return final_scores
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[list[float]]:
A_ = get_data(UpperCAmelCase__ )
A_ = calculate_each_score(UpperCAmelCase__, UpperCAmelCase__ )
A_ = generate_final_scores(UpperCAmelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCAmelCase__ ):
source_data[i].append(UpperCAmelCase__ )
return source_data
| 162 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase : List[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Tuple:
for attribute in key.split('.' ):
_snake_case = getattr(A__ , A__ )
if weight_type is not None:
_snake_case = getattr(A__ , A__ ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Union[str, Any]:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(A__ )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , A__ )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
_snake_case = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case = 'weight'
else:
_snake_case = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None ) -> Union[str, Any]:
_snake_case = torch.load(A__ )
_snake_case = WavLMConfigOrig(checkpoint['cfg'] )
_snake_case = WavLMOrig(A__ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
_snake_case = WavLMConfig.from_pretrained(A__ )
else:
_snake_case = WavLMConfig()
_snake_case = WavLMModel(A__ )
recursively_load_weights(A__ , A__ )
hf_wavlm.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 358 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = False ) -> dict:
_snake_case = {i: [] for i in range(__A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A ):
for j in range(i + 1 , __A ):
if random.random() < probability:
graph[i].append(__A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A )
return graph
def SCREAMING_SNAKE_CASE__ ( __A ) -> dict:
return {
i: [j for j in range(__A ) if i != j] for i in range(__A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = ["""image_processor"""]
lowerCAmelCase_ : int = """SamImageProcessor"""
def __init__( self : Optional[Any] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
super().__init__(_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor
UpperCAmelCase__ = -10
UpperCAmelCase__ = self.image_processor.size["""longest_edge"""]
def __call__( self : Dict , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Union[str, Any] , ):
"""simple docstring"""
UpperCAmelCase__ = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
UpperCAmelCase__ = encoding_image_processor["""original_sizes"""]
if hasattr(_UpperCAmelCase , """numpy""" ): # Checks if Torch or TF tensor
UpperCAmelCase__ = original_sizes.numpy()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._check_and_preprocess_points(
input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , )
UpperCAmelCase__ = self._normalize_and_convert(
_UpperCAmelCase , _UpperCAmelCase , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Any="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
UpperCAmelCase__ = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] ) for point in input_points
]
else:
UpperCAmelCase__ = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase )
for point, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCAmelCase__ , UpperCAmelCase__ = self._pad_points_and_labels(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
if input_labels is not None:
UpperCAmelCase__ = np.array(_UpperCAmelCase )
if input_boxes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
UpperCAmelCase__ = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] , is_bounding_box=_UpperCAmelCase )
for box in input_boxes
]
else:
UpperCAmelCase__ = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase , is_bounding_box=_UpperCAmelCase )
for box, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
UpperCAmelCase__ = np.array(_UpperCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
UpperCAmelCase__ = torch.from_numpy(_UpperCAmelCase )
# boxes batch size of 1 by default
UpperCAmelCase__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCAmelCase__ = tf.convert_to_tensor(_UpperCAmelCase )
# boxes batch size of 1 by default
UpperCAmelCase__ = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCAmelCase__ = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
UpperCAmelCase__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCAmelCase__ = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
UpperCAmelCase__ = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCAmelCase__ = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
UpperCAmelCase__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCAmelCase__ = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
UpperCAmelCase__ = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = max([point.shape[0] for point in input_points] )
UpperCAmelCase__ = []
for i, point in enumerate(_UpperCAmelCase ):
if point.shape[0] != expected_nb_points:
UpperCAmelCase__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCAmelCase__ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_UpperCAmelCase )
UpperCAmelCase__ = processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = original_size
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor._get_preprocess_shape(_UpperCAmelCase , longest_edge=_UpperCAmelCase )
UpperCAmelCase__ = deepcopy(_UpperCAmelCase ).astype(_UpperCAmelCase )
if is_bounding_box:
UpperCAmelCase__ = coords.reshape(-1 , 2 , 2 )
UpperCAmelCase__ = coords[..., 0] * (new_w / old_w)
UpperCAmelCase__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCAmelCase__ = coords.reshape(-1 , 4 )
return coords
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(_UpperCAmelCase , """numpy""" ): # Checks for TF or Torch tensor
UpperCAmelCase__ = input_points.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_points[0] , _UpperCAmelCase ):
raise ValueError("""Input points must be a list of list of floating points.""" )
UpperCAmelCase__ = [np.array(_UpperCAmelCase ) for input_point in input_points]
else:
UpperCAmelCase__ = None
if input_labels is not None:
if hasattr(_UpperCAmelCase , """numpy""" ):
UpperCAmelCase__ = input_labels.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_labels[0] , _UpperCAmelCase ):
raise ValueError("""Input labels must be a list of list integers.""" )
UpperCAmelCase__ = [np.array(_UpperCAmelCase ) for label in input_labels]
else:
UpperCAmelCase__ = None
if input_boxes is not None:
if hasattr(_UpperCAmelCase , """numpy""" ):
UpperCAmelCase__ = input_boxes.numpy().tolist()
if (
not isinstance(_UpperCAmelCase , _UpperCAmelCase )
or not isinstance(input_boxes[0] , _UpperCAmelCase )
or not isinstance(input_boxes[0][0] , _UpperCAmelCase )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
UpperCAmelCase__ = [np.array(_UpperCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
UpperCAmelCase__ = None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : int , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ):
"""simple docstring"""
return self.image_processor.post_process_masks(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
class a_ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : Optional[str] = None):
'''simple docstring'''
lowerCAmelCase__ = (
os.path.join(lowercase__ , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCAmelCase__ = Extractor
def __snake_case ( self : Optional[Any] , lowercase__ : str):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCAmelCase__ = os.path.abspath(lowercase__)
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase__))
def __snake_case ( self : Dict , lowercase__ : str , lowercase__ : bool):
'''simple docstring'''
return force_extract or (
not os.path.isfile(lowercase__) and not (os.path.isdir(lowercase__) and os.listdir(lowercase__))
)
def __snake_case ( self : Tuple , lowercase__ : str , lowercase__ : bool = False):
'''simple docstring'''
lowerCAmelCase__ = self.extractor.infer_extractor_format(lowercase__)
if not extractor_format:
return input_path
lowerCAmelCase__ = self._get_output_path(lowercase__)
if self._do_extract(lowercase__ , lowercase__):
self.extractor.extract(lowercase__ , lowercase__ , lowercase__)
return output_path
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
@abstractmethod
def __snake_case ( cls : List[str] , lowercase__ : Union[Path, str] , **lowercase__ : Union[str, Any]):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
...
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = []
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : int):
'''simple docstring'''
with open(lowercase__ , 'rb') as f:
return f.read(lowercase__)
@classmethod
def __snake_case ( cls : Optional[int] , lowercase__ : Union[Path, str] , lowercase__ : bytes = b""):
'''simple docstring'''
if not magic_number:
lowerCAmelCase__ = max(len(lowercase__) for cls_magic_number in cls.magic_numbers)
try:
lowerCAmelCase__ = cls.read_magic_number(lowercase__ , lowercase__)
except OSError:
return False
return any(magic_number.startswith(lowercase__) for cls_magic_number in cls.magic_numbers)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
def __snake_case ( cls : Optional[int] , lowercase__ : Union[Path, str] , **lowercase__ : Tuple):
'''simple docstring'''
return tarfile.is_tarfile(lowercase__)
@staticmethod
def __snake_case ( lowercase__ : int , lowercase__ : Union[str, Any]):
'''simple docstring'''
def resolved(lowercase__ : str) -> str:
return os.path.realpath(os.path.abspath(lowercase__))
def badpath(lowercase__ : str , lowercase__ : str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase__ , lowercase__)).startswith(lowercase__)
def badlink(lowercase__ : str , lowercase__ : str) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCAmelCase__ = resolved(os.path.join(lowercase__ , os.path.dirname(info.name)))
return badpath(info.linkname , base=lowercase__)
lowerCAmelCase__ = resolved(lowercase__)
for finfo in members:
if badpath(finfo.name , lowercase__):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""")
elif finfo.issym() and badlink(lowercase__ , lowercase__):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""")
elif finfo.islnk() and badlink(lowercase__ , lowercase__):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""")
else:
yield finfo
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__)
lowerCAmelCase__ = tarfile.open(lowercase__)
tar_file.extractall(lowercase__ , members=TarExtractor.safemembers(lowercase__ , lowercase__))
tar_file.close()
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = [B'\x1F\x8B']
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
with gzip.open(lowercase__ , 'rb') as gzip_file:
with open(lowercase__ , 'wb') as extracted_file:
shutil.copyfileobj(lowercase__ , lowercase__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def __snake_case ( cls : str , lowercase__ : Union[Path, str] , lowercase__ : bytes = b""):
'''simple docstring'''
if super().is_extractable(lowercase__ , magic_number=lowercase__):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase__ , 'rb') as fp:
lowerCAmelCase__ = _EndRecData(lowercase__)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCAmelCase__ = fp.read(lowercase__) # CD is where we expect it to be
if len(lowercase__) == sizeCentralDir:
lowerCAmelCase__ = struct.unpack(lowercase__ , lowercase__) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__)
with zipfile.ZipFile(lowercase__ , 'r') as zip_file:
zip_file.extractall(lowercase__)
zip_file.close()
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
with lzma.open(lowercase__) as compressed_file:
with open(lowercase__ , 'wb') as extracted_file:
shutil.copyfileobj(lowercase__ , lowercase__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile')
import rarfile
os.makedirs(lowercase__ , exist_ok=lowercase__)
lowerCAmelCase__ = rarfile.RarFile(lowercase__)
rf.extractall(lowercase__)
rf.close()
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = [B'\x28\xb5\x2F\xFD']
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard')
import zstandard as zstd
lowerCAmelCase__ = zstd.ZstdDecompressor()
with open(lowercase__ , 'rb') as ifh, open(lowercase__ , 'wb') as ofh:
dctx.copy_stream(lowercase__ , lowercase__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = [B'\x42\x5A\x68']
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
with bza.open(lowercase__ , 'rb') as compressed_file:
with open(lowercase__ , 'wb') as extracted_file:
shutil.copyfileobj(lowercase__ , lowercase__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr')
import pyazr
os.makedirs(lowercase__ , exist_ok=lowercase__)
with pyazr.SevenZipFile(lowercase__ , 'r') as archive:
archive.extractall(lowercase__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = [B'\x04\x22\x4D\x18']
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str]):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4')
import lza.frame
with lza.frame.open(lowercase__ , 'rb') as compressed_file:
with open(lowercase__ , 'wb') as extracted_file:
shutil.copyfileobj(lowercase__ , lowercase__)
class a_ :
'''simple docstring'''
UpperCAmelCase_ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __snake_case ( cls : List[str]):
'''simple docstring'''
return max(
len(lowercase__)
for extractor in cls.extractors.values()
if issubclass(lowercase__ , lowercase__)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def __snake_case ( lowercase__ : Union[Path, str] , lowercase__ : int):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase__ , magic_number_length=lowercase__)
except OSError:
return b""
@classmethod
def __snake_case ( cls : Optional[int] , lowercase__ : Union[Path, str] , lowercase__ : bool = False):
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=lowercase__ , )
lowerCAmelCase__ = cls.infer_extractor_format(lowercase__)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __snake_case ( cls : Dict , lowercase__ : Union[Path, str]): # <Added version="2.4.0"/>
'''simple docstring'''
lowerCAmelCase__ = cls._get_magic_number_max_length()
lowerCAmelCase__ = cls._read_magic_number(lowercase__ , lowercase__)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase__ , magic_number=lowercase__):
return extractor_format
@classmethod
def __snake_case ( cls : Optional[Any] , lowercase__ : Union[Path, str] , lowercase__ : Union[Path, str] , lowercase__ : Optional[str] = None , lowercase__ : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(lowercase__) , exist_ok=lowercase__)
# Prevent parallel extractions
lowerCAmelCase__ = str(Path(lowercase__).with_suffix('.lock'))
with FileLock(lowercase__):
shutil.rmtree(lowercase__ , ignore_errors=lowercase__)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase__ , lowercase__): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=lowercase__ , )
lowerCAmelCase__ = extractor if extractor != 'deprecated' else extractor_format
else:
lowerCAmelCase__ = cls.extractors[extractor_format]
return extractor.extract(lowercase__ , lowercase__)
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=lowercase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase__):
return extractor.extract(lowercase__ , lowercase__)
| 119 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 254 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCAmelCase : Any = 0.01
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
__UpperCAmelCase : List[Any] = time.time()
locka.acquire(lowerCAmelCase__ )
assert time.time() - _start > timeout
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : str = """a""" * 1000 + """.lock"""
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowerCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__UpperCAmelCase : Union[str, Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
locka.acquire(0 )
| 254 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__lowercase : Optional[Any] = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Optional[int] = torch.manual_seed(0 )
__a : Tuple = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__a : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = generator.manual_seed(0 )
__a : Dict = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : str = 'cyberpunk 2077'
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : List[Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a : Union[str, Any] = 'A painting of a squirrel eating a burger '
__a : List[str] = torch.manual_seed(0 )
__a : List[str] = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a : str = pipe.image_variation(__a , generator=__a , output_type='numpy' ).images
__a : Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : int = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 362 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase , lowercase )
def _snake_case ( self , **lowercase ) -> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> int:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowercase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowercase )
lowerCAmelCase = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowercase )
lowerCAmelCase = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 46 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 0 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = data
SCREAMING_SNAKE_CASE_: Node | None = None
SCREAMING_SNAKE_CASE_: Node | None = None
def A_ ( _UpperCAmelCase ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A_ ( _UpperCAmelCase ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A_ ( _UpperCAmelCase ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A_ ( ): # Main function for testing.
SCREAMING_SNAKE_CASE_: Optional[int] = Node(1 )
SCREAMING_SNAKE_CASE_: str = Node(2 )
SCREAMING_SNAKE_CASE_: int = Node(3 )
SCREAMING_SNAKE_CASE_: Optional[int] = Node(4 )
SCREAMING_SNAKE_CASE_: Any = Node(5 )
SCREAMING_SNAKE_CASE_: Tuple = Node(6 )
SCREAMING_SNAKE_CASE_: List[Any] = Node(7 )
SCREAMING_SNAKE_CASE_: Any = Node(8 )
SCREAMING_SNAKE_CASE_: str = Node(9 )
print(is_full_binary_tree(lowerCAmelCase_ ) )
print(depth_of_tree(lowerCAmelCase_ ) )
print("Tree is: " )
display(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 369 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowerCAmelCase : List[Any] = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
lowerCAmelCase : Tuple = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ConvBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any="[UNK]" , lowerCAmelCase__ : Optional[Any]="[SEP]" , lowerCAmelCase__ : Any="[PAD]" , lowerCAmelCase__ : Dict="[CLS]" , lowerCAmelCase__ : Dict="[MASK]" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Dict , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_: Optional[int] = getattr(lowerCAmelCase__ , normalizer_state.pop("type"))
SCREAMING_SNAKE_CASE_: Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_: List[str] = strip_accents
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_: Optional[int] = normalizer_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None):
SCREAMING_SNAKE_CASE_: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
SCREAMING_SNAKE_CASE_: Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 127 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase_ : str = logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A ) | 81 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
lowerCamelCase_ : Optional[int] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCamelCase_ : Optional[Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCamelCase_ : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Optional[Any]:
if return_pvalue:
a =pearsonr(__A , __A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__A , __A )[0] )} | 81 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> int:
'''simple docstring'''
UpperCAmelCase__ : Any = tf.convert_to_tensor(snake_case )
UpperCAmelCase__ : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = tf.convert_to_tensor(snake_case )
UpperCAmelCase__ : Optional[int] = tf.cast(math.pi , x.dtype )
UpperCAmelCase__ : List[str] = tf.cast(0.04_4715 , x.dtype )
UpperCAmelCase__ : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(snake_case , 3 )) ))
return x * cdf
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = tf.convert_to_tensor(snake_case )
return x * tf.tanh(tf.math.softplus(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = tf.convert_to_tensor(snake_case )
UpperCAmelCase__ : Tuple = tf.cast(0.04_4715 , x.dtype )
UpperCAmelCase__ : Dict = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = tf.convert_to_tensor(snake_case )
UpperCAmelCase__ : List[str] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
return tf.clip_by_value(_gelu(snake_case ) , -10 , 10 )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : List[Any]=-1 )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = tf.split(snake_case , 2 , axis=snake_case )
return a * tf.math.sigmoid(snake_case )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> int:
'''simple docstring'''
return tf.keras.activations.gelu(snake_case , approximate=snake_case )
_lowerCAmelCase : List[str] = tf.keras.activations.gelu
_lowerCAmelCase : int = approximate_gelu_wrap
else:
_lowerCAmelCase : Union[str, Any] = _gelu
_lowerCAmelCase : Optional[Any] = _gelu_new
_lowerCAmelCase : Union[str, Any] = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Dict:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 298 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class SCREAMING_SNAKE_CASE_ ( _BaseAutoModelClass ):
__lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 343 | from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , *lowercase__ : Optional[int] , **lowercase__ : Dict):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__)
| 119 | import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = 'docs/source/en/_toctree.yml'
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase__ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase__ = []
for duplicate_key in duplicates:
lowerCAmelCase__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def __lowerCamelCase ( lowerCAmelCase__=False ):
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
lowerCAmelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase__ = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase__ = api_doc[model_idx]['sections']
lowerCAmelCase__ = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if 'sections' in section]
lowerCAmelCase__ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase__ = modality_doc['sections']
lowerCAmelCase__ = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowerCAmelCase__ = True
if overwrite:
lowerCAmelCase__ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase__ = model_doc
lowerCAmelCase__ = api_doc
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 119 | 1 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowercase ( lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Optional[Any] = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Optional[int] = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __lowercase ( lowerCamelCase : Any ):
UpperCamelCase_ : int = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __lowercase ( ):
UpperCamelCase_ : Optional[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Optional[int] = 'imagenet-1k-id2label.json'
UpperCamelCase_ : int = 1000
UpperCamelCase_ : Optional[Any] = 'huggingface/label-files'
UpperCamelCase_ : List[str] = num_labels
UpperCamelCase_ : int = json.load(open(cached_download(hf_hub_url(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
UpperCamelCase_ : Union[str, Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : List[str] = idalabel
UpperCamelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : Any = CvtConfig(num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
UpperCamelCase_ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
UpperCamelCase_ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCamelCase_ : Tuple = [2, 2, 20]
UpperCamelCase_ : Optional[Any] = [3, 12, 16]
UpperCamelCase_ : Optional[Any] = [192, 768, 1024]
UpperCamelCase_ : List[str] = CvtForImageClassification(lowerCamelCase )
UpperCamelCase_ : Dict = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
UpperCamelCase_ : List[Any] = image_size
UpperCamelCase_ : Any = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
UpperCamelCase_ : Optional[int] = OrderedDict()
UpperCamelCase_ : Dict = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCamelCase_ : int = list_of_state_dict + cls_token(lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = list_of_state_dict + embeddings(lowerCamelCase )
for cnt in range(config.depth[idx] ):
UpperCamelCase_ : List[Any] = list_of_state_dict + attention(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Tuple = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
UpperCamelCase_ : int = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 175 | from torch import nn
class _lowercase ( nn.Module ):
def __init__( self : Any , snake_case : Dict , snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : List[Any] = class_size
UpperCamelCase_ : List[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase_ : int = nn.Linear(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Dict = self.mlp(snake_case )
return logits
| 175 | 1 |
import numpy as np
def __snake_case ( _UpperCAmelCase ):
return 1 / (1 + np.exp(-vector ))
def __snake_case ( _UpperCAmelCase ):
return vector * sigmoid(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : str = KandinskyVaaImgaImgPipeline
UpperCamelCase__ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase__ : Dict = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase__ : Any = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ : List[Any] = False
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__a = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE)
return model
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
__a = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.dummy_unet
__a = self.dummy_movq
__a = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__a = DDIMScheduler(**__SCREAMING_SNAKE_CASE)
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=0):
'''simple docstring'''
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__SCREAMING_SNAKE_CASE)
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = image.cpu().permute(0 , 2 , 3 , 1)[0]
__a = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE)).convert('''RGB''').resize((256, 256))
if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''):
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
else:
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
__a = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**__SCREAMING_SNAKE_CASE)
__a = pipe.to(__SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE))
__a = output.images
__a = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''')
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
__a = '''A red cartoon frog, 4k'''
__a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(__SCREAMING_SNAKE_CASE)
__a = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa)
__a = pipeline.to(__SCREAMING_SNAKE_CASE)
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = torch.Generator(device='''cpu''').manual_seed(0)
__a , __a = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a = pipeline(
image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 131 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase__ = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase__ = re.compile(R'''^\s*else:''')
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
_lowerCamelCase : List[Any] = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
with open(A_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
_lowerCamelCase : Any = f.readlines()
_lowerCamelCase : Tuple = 0
while line_index < len(A_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase : int = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
_lowerCamelCase : Optional[int] = _re_one_line_import_struct.search(A_ ).groups()[0]
_lowerCamelCase : Optional[Any] = re.findall('''\[([^\]]+)\]''', A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_lowerCamelCase : Union[str, Any] = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
_lowerCamelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_lowerCamelCase : int = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
_lowerCamelCase : Optional[Any] = _re_import_struct_add_many.search(A_ ).groups()[0].split(''', ''' )
_lowerCamelCase : int = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
_lowerCamelCase : Tuple = _re_between_brackets.search(A_ ).groups()[0].split(''', ''' )
_lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase : Dict = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_lowerCamelCase : int = lines[line_index]
_lowerCamelCase : Dict = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_lowerCamelCase : Optional[Any] = lines[line_index]
_lowerCamelCase : Optional[int] = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case_ ( A_ : Tuple, A_ : List[str] ):
'''simple docstring'''
def find_duplicates(A_ : List[str] ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase : Tuple = []
for key in import_dict_objects.keys():
_lowerCamelCase : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_lowerCamelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase : Dict = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
_lowerCamelCase : Optional[Any] = os.path.join(A_, '''__init__.py''' )
_lowerCamelCase : Optional[Any] = parse_init(A_ )
if objects is not None:
_lowerCamelCase : List[Any] = analyze_results(*A_ )
if len(A_ ) > 0:
_lowerCamelCase : List[str] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(A_ ) )
if len(A_ ) > 0:
raise ValueError('''\n\n'''.join(A_ ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_lowerCamelCase : Tuple = str((Path(A_ ) / folder).relative_to(A_ ) )
_lowerCamelCase : List[str] = short_path.replace(os.path.sep, '''.''' )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase : Dict = str((Path(A_ ) / fname).relative_to(A_ ) )
_lowerCamelCase : Union[str, Any] = short_path.replace('''.py''', '''''' ).replace(os.path.sep, '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A_ )
return submodules
lowerCAmelCase__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = importlib.util.spec_from_file_location(
'''transformers''', os.path.join(A_, '''__init__.py''' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
_lowerCamelCase : Any = spec.loader.load_module()
_lowerCamelCase : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
_lowerCamelCase : Dict = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 1 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a__ ):
snake_case__ = (DDIMParallelScheduler,)
snake_case__ = (("eta", 0.0), ("num_inference_steps", 50))
def lowerCamelCase__ ( self : int , **UpperCAmelCase : Dict ):
__lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCAmelCase )
return config
def lowerCamelCase__ ( self : int , **UpperCAmelCase : List[Any] ):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : Optional[int] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCamelCase : str = scheduler_class(**UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : int = 10, 0.0
__lowerCamelCase : Optional[int] = self.dummy_model()
__lowerCamelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for t in scheduler.timesteps:
__lowerCamelCase : Tuple = model(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def lowerCamelCase__ ( self : Tuple ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase )
__lowerCamelCase : List[str] = self.scheduler_classes[0]
__lowerCamelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
__lowerCamelCase : Union[str, Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCamelCase__ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase )
def lowerCamelCase__ ( self : int ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def lowerCamelCase__ ( self : Optional[int] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCAmelCase , num_inference_steps=UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCAmelCase , eta=UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Optional[int] = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase )
__lowerCamelCase : List[Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
__lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
__lowerCamelCase : Optional[int] = samplea.shape[0]
__lowerCamelCase : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCamelCase : int = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase )
__lowerCamelCase : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCamelCase : List[Any] = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCAmelCase )
__lowerCamelCase : Any = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[Any] = self.full_loop()
__lowerCamelCase : Dict = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : Any = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
__lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def lowerCamelCase__ ( self : List[str] ):
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase : Any = self.full_loop(set_alpha_to_one=UpperCAmelCase , beta_start=0.0_1 )
__lowerCamelCase : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def lowerCamelCase__ ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCAmelCase , beta_start=0.0_1 )
__lowerCamelCase : List[Any] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : Dict = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3 | 64 | """simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Optional[int]=7 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[str] = None
if token is not None:
__lowerCamelCase : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__lowerCamelCase : Optional[Any] = "636036"
__lowerCamelCase : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__lowerCamelCase : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
return result["workflow_runs"]
def lowercase_ ( _lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = get_daily_ci_runs(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowerCamelCase : Optional[int] = workflow_run["id"]
break
return workflow_run_id
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: int , _lowerCamelCase: str ) -> Any:
'''simple docstring'''
__lowerCamelCase : Any = get_last_daily_ci_runs(_lowerCamelCase )
if workflow_run_id is not None:
__lowerCamelCase : Dict = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowerCamelCase : int = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: int ) -> Any:
'''simple docstring'''
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : int = {}
for artifact_name in artifact_names:
__lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , F"""{artifact_name}.zip""" )
if os.path.isfile(_lowerCamelCase ):
__lowerCamelCase : Optional[int] = {}
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
with z.open(_lowerCamelCase ) as f:
__lowerCamelCase : Tuple = f.read().decode("UTF-8" )
return results | 64 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: Union[str, Any] = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 |
'''simple docstring'''
from __future__ import annotations
import math
class a__:
def __init__( self : List[str] , __snake_case : int ):
a : str = size
# approximate the overall size of segment tree with given value
a : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a : Any = [0 for i in range(0 , 4 * size )]
a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self : int , __snake_case : int ):
return idx * 2
def lowercase_ ( self : Dict , __snake_case : int ):
return idx * 2 + 1
def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ):
if left_element == right_element:
a : Tuple = a[left_element - 1]
else:
a : Tuple = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
a : Union[str, Any] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : int = self.lazy[idx]
a : Union[str, Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : int = self.lazy[idx]
a : Tuple = True
a : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a : int = val
if left_element != right_element:
a : int = val
a : Dict = val
a : List[str] = True
a : List[str] = True
return True
a : Tuple = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
a : Optional[int] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : str = self.lazy[idx]
a : Optional[Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : Union[str, Any] = self.lazy[idx]
a : Dict = True
a : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a : Dict = (left_element + right_element) // 2
a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Any ):
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCAmelCase: int = 1_5
lowerCAmelCase: Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt) | 297 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'new-model'
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = NewModelConfig
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = """bert-base-cased"""
A__ : Any = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Tuple = TFAutoModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = """bert-base-cased"""
A__ : Any = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : List[str] = TFAutoModelForPreTraining.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Any = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(snake_case )
A__ , A__ : Dict = TFAutoModelForCausalLM.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[Any] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[str] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(snake_case )
A__ , A__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[str] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case )
A__ , A__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : List[Any] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Tuple = TFAutoModelForSequenceClassification.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : Optional[int] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
@require_tensorflow_probability
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A__ : int = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
A__ : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case )
A__ , A__ : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case ) , 1_4410 )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : List[str] = TFAutoModelWithLMHead.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case ) , 1_4410 )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : int = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(snake_case , snake_case )
A__ : Any = copy.deepcopy(model.config )
A__ : Dict = ["""FunnelBaseModel"""]
A__ : List[str] = TFAutoModel.from_config(snake_case )
self.assertIsInstance(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case )
A__ : Tuple = TFAutoModel.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , snake_case )
A__ : Any = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case ):
auto_class.register(snake_case , snake_case )
auto_class.register(snake_case , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
auto_class.register(snake_case , snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : str = BertModelTester(self ).get_config()
A__ : Tuple = NewModelConfig(**tiny_config.to_dict() )
A__ : Any = auto_class.from_config(snake_case )
self.assertIsInstance(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case )
A__ : Any = auto_class.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , """bert-base is not a local folder and is not a valid model identifier""" ):
A__ : Optional[int] = TFAutoModel.from_pretrained("""bert-base""" )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ : List[Any] = TFAutoModel.from_pretrained(snake_case , revision="""aaaaaa""" )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
A__ : Any = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(snake_case , """Use `from_pt=True` to load this model""" ):
A__ : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Dict = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A__ : List[str] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A__ : str = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
A__ : List[Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 296 |
"""simple docstring"""
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : float , snake_case : int ):
'''simple docstring'''
if k in (0.04, 0.06):
A__ : Optional[int] = k
A__ : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : List[Any] ):
'''simple docstring'''
return str(self.k )
def _UpperCamelCase ( self : int , snake_case : str ):
'''simple docstring'''
A__ : List[str] = cva.imread(snake_case , 0 )
A__ , A__ : Union[str, Any] = img.shape
A__ : list[list[int]] = []
A__ : Optional[Any] = img.copy()
A__ : List[str] = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB )
A__ , A__ : List[Any] = np.gradient(snake_case )
A__ : List[Any] = dx**2
A__ : Any = dy**2
A__ : Dict = dx * dy
A__ : Any = 0.04
A__ : Optional[Any] = self.window_size // 2
for y in range(snake_case , h - offset ):
for x in range(snake_case , w - offset ):
A__ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : int = (wxx * wyy) - (wxy**2)
A__ : Any = wxx + wyy
A__ : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ , A_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 296 | 1 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Tuple = 3
__A : Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''llama'''
lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=3_2000 , _UpperCAmelCase=4096 , _UpperCAmelCase=1_1008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = vocab_size
__A : Union[str, Any] = max_position_embeddings
__A : Any = hidden_size
__A : Optional[Any] = intermediate_size
__A : str = num_hidden_layers
__A : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__A : List[Any] = num_attention_heads
__A : int = num_key_value_heads
__A : List[Any] = hidden_act
__A : Union[str, Any] = initializer_range
__A : List[Any] = rms_norm_eps
__A : Any = pretraining_tp
__A : Optional[Any] = use_cache
__A : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}')
__A : Optional[Any] = self.rope_scaling.get('type' , _UpperCAmelCase)
__A : Tuple = self.rope_scaling.get('factor' , _UpperCAmelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}') | 190 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30 | """simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase__ : Tuple = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
lowerCamelCase__ : Optional[int] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
lowerCamelCase__ : Dict = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
return float((preds == labels).mean() )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = float(fa_score(y_true=__UpperCAmelCase , y_pred=__UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )
SCREAMING_SNAKE_CASE_ = float(spearmanr(__UpperCAmelCase , __UpperCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 225 |
from math import sqrt
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1_00_01 ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
while count != nth and number < 3:
number += 1
if is_prime(__UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 225 | 1 |
'''simple docstring'''
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[Any]:
A_ = data
A_ = previous
A_ = next_node
def __str__( self ) -> str:
return F'''{self.data}'''
def __A ( self ) -> int:
return self.data
def __A ( self ) -> Any:
return self.next
def __A ( self ) -> Optional[Any]:
return self.previous
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
A_ = head
def __iter__( self ) -> Any:
return self
def __A ( self ) -> str:
if not self.current:
raise StopIteration
else:
A_ = self.current.get_data()
A_ = self.current.get_next()
return value
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Dict:
A_ = None # First node in list
A_ = None # Last node in list
def __str__( self ) -> List[Any]:
A_ = self.head
A_ = []
while current is not None:
nodes.append(current.get_data() )
A_ = current.get_next()
return " ".join(str(_SCREAMING_SNAKE_CASE ) for node in nodes )
def __contains__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
A_ = self.head
while current:
if current.get_data() == value:
return True
A_ = current.get_next()
return False
def __iter__( self ) -> int:
return LinkedListIterator(self.head )
def __A ( self ) -> Any:
if self.head:
return self.head.get_data()
return None
def __A ( self ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
if self.head is None:
A_ = node
A_ = node
else:
self.insert_before_node(self.head , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
if self.head is None:
self.set_head(_SCREAMING_SNAKE_CASE )
else:
self.insert_after_node(self.tail , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = Node(_SCREAMING_SNAKE_CASE )
if self.head is None:
self.set_head(_SCREAMING_SNAKE_CASE )
else:
self.set_tail(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = node
A_ = node.previous
if node.get_previous() is None:
A_ = node_to_insert
else:
A_ = node_to_insert
A_ = node_to_insert
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = node
A_ = node.next
if node.get_next() is None:
A_ = node_to_insert
else:
A_ = node_to_insert
A_ = node_to_insert
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = 1
A_ = Node(_SCREAMING_SNAKE_CASE )
A_ = self.head
while node:
if current_position == position:
self.insert_before_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
current_position += 1
A_ = node.next
self.insert_after_node(self.tail , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Node:
A_ = self.head
while node:
if node.get_data() == item:
return node
A_ = node.get_next()
raise Exception('''Node not found''' )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if (node := self.get_node(_SCREAMING_SNAKE_CASE )) is not None:
if node == self.head:
A_ = self.head.get_next()
if node == self.tail:
A_ = self.tail.get_previous()
self.remove_node_pointers(_SCREAMING_SNAKE_CASE )
@staticmethod
def __A ( _SCREAMING_SNAKE_CASE ) -> None:
if node.get_next():
A_ = node.previous
if node.get_previous():
A_ = node.next
A_ = None
A_ = None
def __A ( self ) -> Any:
return self.head is None
def _UpperCAmelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | '''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__()
A_ = module
A_ = nn.Sequential(
nn.Linear(module.in_features , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) , nn.Linear(_SCREAMING_SNAKE_CASE , module.out_features , bias=_SCREAMING_SNAKE_CASE ) , )
A_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.module(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) + self.adapter(_SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = 'bigscience/bloom-1b7'
# Constant values
__lowercase : str = 2.109659552692574
__lowercase : int = 'Hello my name is'
__lowercase : Optional[Any] = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__lowercase : Optional[Any] = 10
def __A ( self ) -> List[str]:
# Models and tokenizer
A_ = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[Any]:
super().setUp()
# Models and tokenizer
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> List[str]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
A_ = self.model_abit.config
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''quantization_config''' ) )
A_ = config.to_dict()
A_ = config.to_diff_dict()
A_ = config.to_json_string()
def __A ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
A_ = self.model_fpaa.get_memory_footprint()
A_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ) -> Union[str, Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ) -> Optional[int]:
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Optional[int]:
A_ = BitsAndBytesConfig()
A_ = True
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Tuple:
with self.assertRaises(_SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = BitsAndBytesConfig()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __A ( self ) -> Dict:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_fpaa.to(torch.floataa )
A_ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
A_ = self.model_fpaa.half()
# Check this does not throw an error
A_ = self.model_fpaa.float()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Optional[Any]:
A_ = '''t5-small'''
A_ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
A_ = AutoTokenizer.from_pretrained(cls.model_name )
A_ = '''Translate in German: Hello, my dog is cute'''
def __A ( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
from transformers import TaForConditionalGeneration
A_ = TaForConditionalGeneration._keep_in_fpaa_modules
A_ = None
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
A_ = modules
def __A ( self ) -> Dict:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> int:
super().setUp()
# model_name
A_ = '''bigscience/bloom-560m'''
A_ = '''t5-small'''
# Different types of model
A_ = AutoModel.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Sequence classification model
A_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# CausalLM model
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Seq2seq model
A_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> List[str]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
super().setUp()
def __A ( self ) -> List[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
A_ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
super().setUp()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
A_ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> str:
A_ = '''facebook/opt-350m'''
super().setUp()
def __A ( self ) -> Optional[int]:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_SCREAMING_SNAKE_CASE ) ):
A_ = LoRALayer(module.q_proj , rank=16 )
A_ = LoRALayer(module.k_proj , rank=16 )
A_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ = model.forward(**_SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'gpt2-xl'
__lowercase : List[Any] = 3.3191854854152187
| 18 | 1 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 22 ):
'''simple docstring'''
UpperCAmelCase__ = range(1 , snake_case__ )
UpperCAmelCase__ = range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"{solution(1_0, 2_2) = }")
| 346 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 370 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _A ( snake_case , snake_case ) -> Tuple:
_lowercase : int = args.log_outputs
_lowercase : List[Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
_lowercase : Optional[Any] = load_metric("wer" )
_lowercase : str = load_metric("cer" )
# compute metrics
_lowercase : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
_lowercase : Optional[Any] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
_lowercase : Tuple = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_lowercase : int = F'''log_{dataset_id}_predictions.txt'''
_lowercase : List[Any] = F'''log_{dataset_id}_targets.txt'''
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case , snake_case ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def _A ( snake_case ) -> str:
_lowercase : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_lowercase : Union[str, Any] = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_lowercase : List[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
_lowercase : List[Any] = " ".join(text.split(snake_case ) )
return text
def _A ( snake_case ) -> List[str]:
# load dataset
_lowercase : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
_lowercase : Tuple = feature_extractor.sampling_rate
# resample audio
_lowercase : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
_lowercase : Optional[Any] = 0 if torch.cuda.is_available() else -1
_lowercase : Tuple = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case ):
_lowercase : Optional[int] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_lowercase : int = prediction["text"]
_lowercase : List[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
_lowercase : Optional[int] = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
_snake_case = parser.parse_args()
main(args)
| 250 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.dummy_uncond_unet
_lowercase : Dict = KarrasVeScheduler()
_lowercase : Any = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Any = torch.manual_seed(0 )
_lowercase : List[Any] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" ).images
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : List[str] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" , return_dict=_UpperCamelCase )[0]
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = "google/ncsnpp-celebahq-256"
_lowercase : Any = UNetaDModel.from_pretrained(_UpperCamelCase )
_lowercase : List[Any] = KarrasVeScheduler()
_lowercase : int = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : Tuple = pipe(num_inference_steps=20 , generator=_UpperCamelCase , output_type="numpy" ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase : Tuple = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 250 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
a_ : Any = number_of_bytes // partitions
a_ : Tuple = []
for i in range(__A ):
a_ : List[str] = i * bytes_per_partition + 1
a_ : str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
while b:
a_ , a_ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__A , a % b )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 120 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Tuple = BlipImageProcessor()
snake_case__ : str = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
snake_case__ : Any = BlipaProcessor(__lowercase ,__lowercase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :List[str] ,**__lowercase :int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowercase ).tokenizer
def __lowerCamelCase ( self :Optional[Any] ,**__lowercase :Any ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowercase ).image_processor
def __lowerCamelCase ( self :List[Any] ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[Any] = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta )]
snake_case__ : int = [Image.fromarray(np.moveaxis(__lowercase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Tuple = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
snake_case__ : Optional[Any] = self.get_image_processor(do_normalize=__lowercase ,padding_value=1.0 )
snake_case__ : List[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=__lowercase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__lowercase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowercase )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Union[str, Any] = BlipaProcessor(tokenizer=__lowercase ,image_processor=__lowercase )
snake_case__ : Tuple = self.prepare_image_inputs()
snake_case__ : Any = image_processor(__lowercase ,return_tensors='''np''' )
snake_case__ : Dict = processor(images=__lowercase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Optional[Any] = BlipaProcessor(tokenizer=__lowercase ,image_processor=__lowercase )
snake_case__ : str = '''lower newer'''
snake_case__ : str = processor(text=__lowercase )
snake_case__ : Tuple = tokenizer(__lowercase ,return_token_type_ids=__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__lowercase ,image_processor=__lowercase )
snake_case__ : Tuple = '''lower newer'''
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : int = processor(text=__lowercase ,images=__lowercase )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Tuple = BlipaProcessor(tokenizer=__lowercase ,image_processor=__lowercase )
snake_case__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : int = processor.batch_decode(__lowercase )
snake_case__ : List[str] = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Union[str, Any] = BlipaProcessor(tokenizer=__lowercase ,image_processor=__lowercase )
snake_case__ : int = '''lower newer'''
snake_case__ : Optional[Any] = self.prepare_image_inputs()
snake_case__ : str = processor(text=__lowercase ,images=__lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 230 |
from math import asin, atan, cos, radians, sin, sqrt, tan
A__ : Optional[int] = 637_8137.0
A__ : List[str] = 635_6752.31_4245
A__ : Union[str, Any] = 6_37_81_37
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = radians(lowerCamelCase_ )
lowercase__ = radians(lowerCamelCase_ )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(lowerCamelCase_ ) * cos(lowerCamelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 0 |
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ = [
'good first issue',
'feature request',
'wip',
]
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowercase : Optional[int] = g.get_repo('''huggingface/accelerate''' )
lowercase : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
lowercase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda __magic_name__ : i.created_at , reverse=__magic_name__ )
lowercase : str = comments[0] if len(__magic_name__ ) > 0 else None
lowercase : Tuple = dt.utcnow()
lowercase : List[Any] = (current_time - issue.updated_at).days
lowercase : List[Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 116 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ = get_tests_dir('fixtures/vocab.json')
lowerCAmelCase_ = get_tests_dir('fixtures')
class _A ( unittest.TestCase ):
_UpperCamelCase : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] = 0
def __a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_A , _A )
def __a ( self : Any ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str = WavaVecaConfig()
lowercase : Tuple = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_A )
processor.save_pretrained(_A )
lowercase : Optional[Any] = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_A , os.path.join(_A , _A ) )
copyfile(_A , os.path.join(_A , '''vocab.json''' ) )
lowercase : Any = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[Any] = WavaVecaFeatureExtractor()
lowercase : Optional[int] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase : Union[str, Any] = WavaVecaProcessor(_A , _A )
# save in new folder
processor.save_pretrained(_A )
# drop `processor_class` in tokenizer
with open(os.path.join(_A , _A ) , '''r''' ) as f:
lowercase : Union[str, Any] = json.load(_A )
config_dict.pop('''processor_class''' )
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write(json.dumps(_A ) )
lowercase : int = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : str ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int = WavaVecaFeatureExtractor()
lowercase : Dict = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase : Optional[int] = WavaVecaProcessor(_A , _A )
# save in new folder
processor.save_pretrained(_A )
# drop `processor_class` in feature extractor
with open(os.path.join(_A , _A ) , '''r''' ) as f:
lowercase : int = json.load(_A )
config_dict.pop('''processor_class''' )
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write(json.dumps(_A ) )
lowercase : Any = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_A )
# copy relevant files
copyfile(_A , os.path.join(_A , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write('''{}''' )
lowercase : Tuple = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_A ):
lowercase : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
lowercase : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
lowercase : str = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
lowercase : Union[str, Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
lowercase : Optional[int] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A , use_fast=_A )
lowercase : Any = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoProcessor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoProcessor.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase : Optional[Any] = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[Any] = os.path.join(_A , '''vocab.txt''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Optional[int] = CustomTokenizer(_A )
lowercase : Union[str, Any] = CustomProcessor(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_A )
lowercase : Dict = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __a ( self : str ) -> Any:
"""simple docstring"""
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = False
class _A ( _lowerCamelCase ):
_UpperCamelCase : Union[str, Any] = False
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''AutoFeatureExtractor'''
_UpperCamelCase : Any = '''AutoTokenizer'''
_UpperCamelCase : Any = False
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoProcessor.register(_A , _A )
# If remote code is not set, the default is to use local classes.
lowercase : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __a ( self : Any ) -> int:
"""simple docstring"""
lowercase : Any = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class _A ( unittest.TestCase ):
_UpperCamelCase : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __a ( cls : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : str = TOKEN
HfFolder.save_token(_A )
@classmethod
def __a ( cls : List[Any] ) -> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = WavaVecaProcessor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_A , '''test-processor''' ) , push_to_hub=_A , use_auth_token=self._token )
lowercase : Any = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(new_processor.feature_extractor , _A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : int = WavaVecaProcessor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_A , '''test-processor-org''' ) , push_to_hub=_A , use_auth_token=self._token , organization='''valid_org''' , )
lowercase : Dict = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(new_processor.feature_extractor , _A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __a ( self : str ) -> Dict:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase : List[str] = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Optional[int] = os.path.join(_A , '''vocab.txt''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Optional[Any] = CustomTokenizer(_A )
lowercase : Optional[Any] = CustomProcessor(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase : List[str] = Repository(_A , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(_A )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_A , '''tokenizer_config.json''' ) ) as f:
lowercase : Optional[Any] = json.load(_A )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_processing.py''' ) ) )
repo.push_to_hub()
lowercase : Tuple = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=_A )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' ) | 116 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( __snake_case : list[int] ):
return len(set(_lowerCamelCase ) ) == len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=7 ):
lowerCAmelCase__ = None
if token is not None:
lowerCAmelCase__ = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowerCAmelCase__ = "636036"
lowerCAmelCase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowerCAmelCase__ = requests.get(_a , headers=_a ).json()
return result["workflow_runs"]
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = get_daily_ci_runs(_a )
lowerCAmelCase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCAmelCase__ = workflow_run["id"]
break
return workflow_run_id
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = get_last_daily_ci_runs(_a )
if workflow_run_id is not None:
lowerCAmelCase__ = get_artifacts_links(worflow_run_id=_a , token=_a )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCAmelCase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_a , artifact_url=_a , output_dir=_a , token=_a )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
get_last_daily_ci_artifacts(_a , _a , _a )
lowerCAmelCase__ = {}
for artifact_name in artifact_names:
lowerCAmelCase__ = os.path.join(_a , F"""{artifact_name}.zip""" )
if os.path.isfile(_a ):
lowerCAmelCase__ = {}
with zipfile.ZipFile(_a ) as z:
for filename in z.namelist():
if not os.path.isdir(_a ):
# read the file
with z.open(_a ) as f:
lowerCAmelCase__ = f.read().decode('UTF-8' )
return results
| 364 | import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['input_values', 'attention_mask']
def __init__( self : Union[str, Any] , lowercase__ : int = 1 , lowercase__ : int = 16_000 , lowercase__ : float = 0.0 , lowercase__ : bool = False , lowercase__ : int = 80 , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : str = "hann_window" , lowercase__ : float = 1.0 , lowercase__ : float = 80 , lowercase__ : float = 7_600 , lowercase__ : float = 1e-10 , lowercase__ : int = 2 , lowercase__ : bool = True , **lowercase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__)
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = win_length
lowerCAmelCase__ = win_function
lowerCAmelCase__ = frame_signal_scale
lowerCAmelCase__ = fmin
lowerCAmelCase__ = fmax
lowerCAmelCase__ = mel_floor
lowerCAmelCase__ = reduction_factor
lowerCAmelCase__ = win_length * sampling_rate // 1_000
lowerCAmelCase__ = hop_length * sampling_rate // 1_000
lowerCAmelCase__ = optimal_fft_length(self.sample_size)
lowerCAmelCase__ = (self.n_fft // 2) + 1
lowerCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase__)
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __snake_case ( lowercase__ : List[np.ndarray] , lowercase__ : List[np.ndarray] , lowercase__ : float = 0.0):
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase__ = np.array(lowercase__ , np.intaa)
lowerCAmelCase__ = []
for vector, length in zip(lowercase__ , attention_mask.sum(-1)):
lowerCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
lowerCAmelCase__ = padding_value
normed_input_values.append(lowercase__)
else:
lowerCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __snake_case ( self : List[Any] , lowercase__ : np.ndarray , ):
'''simple docstring'''
lowerCAmelCase__ = spectrogram(
lowercase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : str , lowercase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[int] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.')
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if audio is not None:
lowerCAmelCase__ = self._process_audio(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , )
else:
lowerCAmelCase__ = None
if audio_target is not None:
lowerCAmelCase__ = self._process_audio(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase__ = inputs_target['input_values']
lowerCAmelCase__ = inputs_target.get('attention_mask')
if decoder_attention_mask is not None:
lowerCAmelCase__ = decoder_attention_mask
return inputs
def __snake_case ( self : Tuple , lowercase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase__ : bool = False , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ = isinstance(lowercase__ , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
lowerCAmelCase__ = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray):
lowerCAmelCase__ = np.asarray(lowercase__ , dtype=np.floataa)
elif isinstance(lowercase__ , np.ndarray) and speech.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase__ = [self._extract_mel_features(lowercase__) for waveform in speech]
lowerCAmelCase__ = BatchFeature({'input_values': features})
lowerCAmelCase__ = self.num_mel_bins
else:
lowerCAmelCase__ = BatchFeature({'input_values': speech})
lowerCAmelCase__ = self.pad(
lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = feature_size_hack
# convert input values to correct format
lowerCAmelCase__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray):
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for array in input_values]
elif (
not isinstance(lowercase__ , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
lowerCAmelCase__ = [array.astype(np.floataa) for array in input_values]
elif isinstance(lowercase__ , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = input_values.astype(np.floataa)
# convert attention_mask to correct format
lowerCAmelCase__ = padded_inputs.get('attention_mask')
if attention_mask is not None:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase__ = (
attention_mask
if self._get_padding_strategies(lowercase__ , max_length=lowercase__) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowercase__ , padding_value=self.padding_value)
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(lowercase__)
return padded_inputs
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 119 | 0 |
"""simple docstring"""
import math
import sys
def _lowerCAmelCase ( lowercase_ ):
if number != int(lowercase_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
UpperCAmelCase = [-1] * (number + 1)
UpperCAmelCase = 0
for i in range(1 , number + 1 ):
UpperCAmelCase = sys.maxsize
UpperCAmelCase = int(math.sqrt(lowercase_ ) )
for j in range(1 , root + 1 ):
UpperCAmelCase = 1 + answers[i - (j**2)]
UpperCAmelCase = min(lowercase_ , lowercase_ )
UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase__ : Dict = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__ : Any = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase__ : List[Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase__ : str = model(__lowerCAmelCase , labels=__lowerCAmelCase ).loss
lowercase__ : List[str] = -tf.math.reduce_mean(__lowerCAmelCase ).numpy()
lowercase__ : str = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 198 | 0 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : List[str] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
lowerCamelCase : Tuple = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
lowerCamelCase : Dict = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
lowerCamelCase : int = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(6_4, 6_4)
)
lowerCamelCase : int = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : Optional[int] = np.expand_dims(test_image, axis=0)
lowerCamelCase : List[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Dict = "Normal"
if result[0][0] == 1:
lowerCamelCase : List[str] = "Abnormality detected"
| 365 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =int(number**0.5 )
return number == sq * sq
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[int, int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_SCREAMING_SNAKE_CASE =x_den * y_den * z_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def _lowerCAmelCase ( _UpperCamelCase : int = 35 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =set()
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =Fraction(0 )
_SCREAMING_SNAKE_CASE =42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_SCREAMING_SNAKE_CASE =x_num * y_den + x_den * y_num
_SCREAMING_SNAKE_CASE =x_den * y_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
_SCREAMING_SNAKE_CASE =(
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_SCREAMING_SNAKE_CASE =x_den * x_den * y_den * y_den
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=-1
_SCREAMING_SNAKE_CASE =x_num * y_num
_SCREAMING_SNAKE_CASE =x_den * y_num + x_num * y_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
_SCREAMING_SNAKE_CASE =x_num * x_num * y_num * y_num
_SCREAMING_SNAKE_CASE =(
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
for num, den in unique_s:
total += Fraction(_UpperCamelCase , _UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 114 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__UpperCamelCase : Union[str, Any] = True
except (ImportError, AttributeError):
__UpperCamelCase : Dict = object
def A ( *_lowercase , **_lowercase ):
pass
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = logging.get_logger('transformers-cli/serving')
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCamelCase , args.host , args.port , args.workers )
class lowercase__ ( __A):
UpperCamelCase_ = 42
class lowercase__ ( __A):
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class lowercase__ ( __A):
UpperCamelCase_ = 42
class lowercase__ ( __A):
UpperCamelCase_ = 42
class lowercase__ ( __A):
@staticmethod
def __A ( UpperCamelCase__ : ArgumentParser ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowercase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowercase_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowercase_ , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowercase_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowercase_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowercase_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowercase_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowercase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowercase_ )
def __init__( self : int , UpperCamelCase__ : Pipeline , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = pipeline
SCREAMING_SNAKE_CASE : Any = host
SCREAMING_SNAKE_CASE : List[str] = port
SCREAMING_SNAKE_CASE : List[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install \"transformers[serving]\".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowercase_ , response_class=lowercase_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowercase_ , response_class=lowercase_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowercase_ , response_class=lowercase_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowercase_ , response_class=lowercase_ , methods=['''POST'''] , ),
] , timeout=600 , )
def __A ( self : int ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __A ( self : List[Any] ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __A ( self : Union[str, Any] , UpperCamelCase__ : str = Body(lowercase_ , embed=lowercase_ ) , UpperCamelCase__ : bool = Body(lowercase_ , embed=lowercase_ ) ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._pipeline.tokenizer.tokenize(lowercase_ )
if return_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._pipeline.tokenizer.convert_tokens_to_ids(lowercase_ )
return ServeTokenizeResult(tokens=lowercase_ , tokens_ids=lowercase_ )
else:
return ServeTokenizeResult(tokens=lowercase_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowercase_ )} )
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] = Body(lowercase_ , embed=lowercase_ ) , UpperCamelCase__ : bool = Body(lowercase_ , embed=lowercase_ ) , UpperCamelCase__ : bool = Body(lowercase_ , embed=lowercase_ ) , ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : str = self._pipeline.tokenizer.decode(lowercase_ , lowercase_ , lowercase_ )
return ServeDeTokenizeResult(model='''''' , text=lowercase_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowercase_ )} )
async def __A ( self : Tuple , UpperCamelCase__ : int=Body(lowercase_ , embed=lowercase_ ) ):
'''simple docstring'''
if len(lowercase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
SCREAMING_SNAKE_CASE : int = self._pipeline(lowercase_ )
return ServeForwardResult(output=lowercase_ )
except Exception as e:
raise HTTPException(500 , {'''error''': str(lowercase_ )} )
| 182 | from PIL import Image
def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int):
lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCamelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 87 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : List[Any] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
"""simple docstring"""
lowercase : Any = """trajectory_transformer"""
lowercase : Optional[int] = ["""past_key_values"""]
lowercase : List[str] = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCamelCase=1_00 , __UpperCamelCase=5 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=2_49 , __UpperCamelCase=6 , __UpperCamelCase=17 , __UpperCamelCase=25 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=1_28 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0006 , __UpperCamelCase=5_12 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=1 , __UpperCamelCase=True , __UpperCamelCase=1 , __UpperCamelCase=5_02_56 , __UpperCamelCase=5_02_56 , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Optional[Any] = action_weight
__UpperCamelCase : Optional[Any] = reward_weight
__UpperCamelCase : Union[str, Any] = value_weight
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : int = block_size
__UpperCamelCase : Any = action_dim
__UpperCamelCase : int = observation_dim
__UpperCamelCase : Union[str, Any] = transition_dim
__UpperCamelCase : Dict = learning_rate
__UpperCamelCase : List[Any] = n_layer
__UpperCamelCase : Dict = n_head
__UpperCamelCase : Optional[int] = n_embd
__UpperCamelCase : Optional[int] = embd_pdrop
__UpperCamelCase : Tuple = attn_pdrop
__UpperCamelCase : List[str] = resid_pdrop
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Union[str, Any] = layer_norm_eps
__UpperCamelCase : Any = kaiming_initializer_range
__UpperCamelCase : Tuple = use_cache
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) | 366 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Dict = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : str = seq_length
__UpperCamelCase : List[Any] = is_training
__UpperCamelCase : str = use_input_mask
__UpperCamelCase : int = use_token_type_ids
__UpperCamelCase : str = use_labels
__UpperCamelCase : List[str] = vocab_size
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : Union[str, Any] = num_attention_heads
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : List[Any] = attention_probs_dropout_prob
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : Union[str, Any] = type_vocab_size
__UpperCamelCase : Optional[Any] = type_sequence_label_size
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : Union[str, Any] = num_labels
__UpperCamelCase : Any = num_choices
__UpperCamelCase : Optional[Any] = scope
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[str] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : int = None
if self.use_labels:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
__UpperCamelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : int = True
__UpperCamelCase : Tuple = LlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
__UpperCamelCase : Union[str, Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
__UpperCamelCase : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[str] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
__UpperCamelCase : Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
__UpperCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
__UpperCamelCase : List[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
# select random slice
__UpperCamelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : List[str] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase : Tuple = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Tuple = False
lowercase : List[Any] = False
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LlamaModelTester(self )
__UpperCamelCase : List[str] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[int] = 3
__UpperCamelCase : int = input_dict["input_ids"]
__UpperCamelCase : Optional[Any] = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[str] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Any = "single_label_classification"
__UpperCamelCase : List[str] = input_dict["input_ids"]
__UpperCamelCase : Tuple = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : Optional[int] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = 3
__UpperCamelCase : Tuple = "multi_label_classification"
__UpperCamelCase : Any = input_dict["input_ids"]
__UpperCamelCase : str = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Optional[Any] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Any = ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Union[str, Any] = LlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
__UpperCamelCase : int = original_model(__UpperCamelCase ).last_hidden_state
__UpperCamelCase : List[Any] = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Dict = {"type": scaling_type, "factor": 10.0}
__UpperCamelCase : Optional[Any] = LlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
__UpperCamelCase : Optional[int] = scaled_model(__UpperCamelCase ).last_hidden_state
__UpperCamelCase : Tuple = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Tuple = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__UpperCamelCase : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCamelCase : List[str] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Tuple = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__UpperCamelCase : str = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCamelCase : int = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Any = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__UpperCamelCase : Any = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCamelCase : Any = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Union[str, Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__UpperCamelCase : Optional[Any] = model(torch.tensor(__UpperCamelCase ) )
__UpperCamelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCamelCase : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : List[str] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__UpperCamelCase : List[str] = "Simply put, the theory of relativity states that "
__UpperCamelCase : Optional[Any] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__UpperCamelCase : Dict = tokenizer.encode(__UpperCamelCase , return_tensors="pt" )
__UpperCamelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__UpperCamelCase )
# greedy generation outputs
__UpperCamelCase : List[Any] = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 171 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def snake_case ( snake_case__ :float) -> float:
if num <= 0:
raise ValueError("""math domain error""")
return quad(snake_case__ , 0 , snake_case__ , args=(snake_case__))[0]
def snake_case ( snake_case__ :float , snake_case__ :float) -> float:
return math.pow(snake_case__ , z - 1) * math.exp(-x)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 180 | import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Dict = WavaVecaPhonemeCTCTokenizer
lowerCamelCase :Optional[int] = False
def UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
_A = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=20 , lowerCAmelCase_=5 ) -> Tuple[str, list]:
_A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )) for i in range(len(lowerCAmelCase_ ) )]
_A = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
_A = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
_A = toks + toks
# toks_str = [t[1] for t in toks]
_A = [t[0] for t in toks]
# Ensure consistency
_A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
_A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
_A = """ """ + output_txt
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_A = tokenizer("""m xxx ɪ""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_A = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
_A = tokenizer("""maɪ c""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase ( self ) -> int:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowerCAmelCase_ )
_A = """Hello how are you"""
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""en-us""" ).input_ids
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowerCAmelCase_ , """ɛ l o h aʊ a ʁ j u""" )
def UpperCAmelCase ( self ) -> Any:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how Are you"""
_A = """hello how are you"""
_A = tokenizer(lowerCAmelCase_ ).input_ids
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_A = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_A = tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase_ ) )
# transform list to ModelOutput
_A = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
[recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for la, la in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_A = tokenizer.batch_decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ )
_A = [tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ ) for ids in sample_ids]
check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCAmelCase ( self ) -> int:
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_A = tokenizer.add_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size + len(lowerCAmelCase_ ) )
_A = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_A = tokenizer.add_special_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size_a + len(lowerCAmelCase_ ) )
_A = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> str:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_A = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_A = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(output["""text"""] , lowerCAmelCase_ )
| 180 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowerCAmelCase: Dict = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = CLIPConfig
lowercase__ = ["""CLIPEncoderLayer"""]
def __init__( self : Dict , __snake_case : CLIPConfig ):
super().__init__(__snake_case )
a : Optional[int] = CLIPVisionModelWithProjection(config.vision_config )
a : List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
a : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowercase_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Tuple=0.5 , __snake_case : Optional[int]=0.5 ):
a : Any = self.vision_model(__snake_case )[0]
a : Tuple = self.p_head(__snake_case )
a : Optional[Any] = nsfw_detected.flatten()
a : Optional[int] = nsfw_detected > p_threshold
a : Union[str, Any] = nsfw_detected.tolist()
if any(__snake_case ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(__snake_case ):
if nsfw_detected_:
a : Tuple = np.zeros(images[idx].shape )
a : List[Any] = self.w_head(__snake_case )
a : Dict = watermark_detected.flatten()
a : str = watermark_detected > w_threshold
a : int = watermark_detected.tolist()
if any(__snake_case ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(__snake_case ):
if watermark_detected_:
a : Dict = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 96 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a__( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[Any] , __snake_case : float , __snake_case : Callable , __snake_case : int , __snake_case : float = 1.0 , __snake_case : str = None , ):
super().__init__()
a : Dict = initial_learning_rate
a : Optional[int] = warmup_steps
a : Tuple = power
a : str = decay_schedule_fn
a : Optional[int] = name
def __call__( self : List[Any] , __snake_case : int ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a : Optional[int] = tf.cast(__snake_case , tf.floataa )
a : Tuple = tf.cast(self.warmup_steps , tf.floataa )
a : str = global_step_float / warmup_steps_float
a : Union[str, Any] = self.initial_learning_rate * tf.math.pow(__snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__snake_case , )
def lowercase_ ( self : Dict ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase__ ( _A , _A , _A , _A = 0.0 , _A = 0.9 , _A = 0.999 , _A = 1E-8 , _A = None , _A = None , _A = 0.0 , _A = 1.0 , _A = None , ):
a : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_A , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_A , )
if num_warmup_steps:
a : Tuple = WarmUp(
initial_learning_rate=_A , decay_schedule_fn=_A , warmup_steps=_A , )
if weight_decay_rate > 0.0:
a : List[Any] = AdamWeightDecay(
learning_rate=_A , weight_decay_rate=_A , beta_a=_A , beta_a=_A , epsilon=_A , clipnorm=_A , global_clipnorm=_A , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=_A , )
else:
a : Tuple = tf.keras.optimizers.Adam(
learning_rate=_A , beta_a=_A , beta_a=_A , epsilon=_A , clipnorm=_A , global_clipnorm=_A , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , __snake_case : float = 0.9 , __snake_case : float = 0.999 , __snake_case : float = 1e-7 , __snake_case : bool = False , __snake_case : float = 0.0 , __snake_case : Optional[List[str]] = None , __snake_case : Optional[List[str]] = None , __snake_case : str = "AdamWeightDecay" , **__snake_case : Optional[int] , ):
super().__init__(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
a : Tuple = weight_decay_rate
a : Optional[Any] = include_in_weight_decay
a : Any = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls : Any , __snake_case : Optional[Any] ):
a : Any = {'WarmUp': WarmUp}
return super(__snake_case , cls ).from_config(__snake_case , custom_objects=__snake_case )
def lowercase_ ( self : str , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Optional[int] ):
super(__snake_case , self )._prepare_local(__snake_case , __snake_case , __snake_case )
a : List[str] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str ):
a : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self : List[str] , __snake_case : List[str] , __snake_case : Any=None , **__snake_case : Tuple ):
a , a : Dict = list(zip(*__snake_case ) )
return super(__snake_case , self ).apply_gradients(zip(__snake_case , __snake_case ) , name=__snake_case , **__snake_case )
def lowercase_ ( self : List[Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a : List[Any] = apply_state or {}
a : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
a : int = self._fallback_apply_state(__snake_case , __snake_case )
a : Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Tuple=None ):
a , a : List[Any] = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
a : List[Any] = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_dense(__snake_case , __snake_case , **__snake_case )
def lowercase_ ( self : Any , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int=None ):
a , a : Tuple = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
a : int = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_sparse(__snake_case , __snake_case , __snake_case , **__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self : List[str] , __snake_case : Dict ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return False
return True
class a__( lowerCamelCase__ ):
def __init__( self : List[str] ):
a : List[Any] = []
a : Optional[int] = None
@property
def lowercase_ ( self : Any ):
if self._accum_steps is None:
a : List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self : Optional[int] ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , __snake_case : Any ):
if not self._gradients:
a : Union[str, Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__snake_case ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__snake_case ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__snake_case )}""" )
for accum_gradient, gradient in zip(self._gradients , __snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__snake_case )
self._accum_steps.assign_add(1 )
def lowercase_ ( self : Tuple ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__snake_case ) ) | 96 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
A_ : Dict = AutoConfig.from_pretrained(a_ )
A_ : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=a_ )
A_ : Union[str, Any] = checkpoints.load_tax_checkpoint(a_ )
A_ : Union[str, Any] = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
A_ : Union[str, Any] = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A_ : Tuple = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A_ : Optional[Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"""
""" attribute with a value from [\'local\', \'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
A_ : List[str] = F"layers_{str(a_ )}"
# Self-Attention
A_ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
A_ : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
A_ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
A_ : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A_ : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
A_ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
A_ : str = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
A_ : Any = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
A_ : Any = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
A_ : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
A_ : str = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
A_ : Any = flax_model.params["""encoder"""]["""block"""][str(a_ )]["""layer"""]
A_ : str = tax_attention_key
A_ : Any = tax_attention_out
A_ : List[str] = tax_attention_query
A_ : str = tax_attention_value
A_ : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A_ : str = tax_global_layer_norm
if split_mlp_wi:
A_ : Any = tax_mlp_wi_a
A_ : List[Any] = tax_mlp_wi_a
else:
A_ : List[Any] = tax_mlp_wi
A_ : Tuple = tax_mlp_wo
A_ : Any = tax_mlp_layer_norm
A_ : List[Any] = flax_model_encoder_layer_block
# Only for layer 0:
A_ : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
A_ : Dict = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A_ : List[Any] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
A_ : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
A_ : str = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
A_ : List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A_ : List[str] = F"layers_{str(a_ )}"
# Self-Attention
A_ : Any = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
A_ : Dict = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
A_ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
A_ : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
A_ : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
A_ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
A_ : Any = tax_enc_dec_attention_module["""key"""]["""kernel"""]
A_ : Union[str, Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
A_ : str = tax_enc_dec_attention_module["""query"""]["""kernel"""]
A_ : Union[str, Any] = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
A_ : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
A_ : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
A_ : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
A_ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
A_ : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
A_ : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
A_ : List[str] = flax_model.params["""decoder"""]["""block"""][str(a_ )]["""layer"""]
A_ : str = tax_attention_key
A_ : List[str] = tax_attention_out
A_ : Optional[Any] = tax_attention_query
A_ : Tuple = tax_attention_value
A_ : Dict = tax_pre_attention_layer_norm
A_ : List[str] = tax_enc_dec_attention_key
A_ : Optional[Any] = tax_enc_dec_attention_out
A_ : Union[str, Any] = tax_enc_dec_attention_query
A_ : Optional[Any] = tax_enc_dec_attention_value
A_ : Union[str, Any] = tax_cross_layer_norm
if split_mlp_wi:
A_ : str = tax_mlp_wi_a
A_ : List[Any] = tax_mlp_wi_a
else:
A_ : Union[str, Any] = tax_mlp_wi
A_ : Optional[int] = tax_mlp_wo
A_ : Optional[Any] = txa_mlp_layer_norm
A_ : List[str] = flax_model_decoder_layer_block
# Decoder Normalization
A_ : List[str] = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
A_ : Optional[int] = txa_decoder_norm
# Only for layer 0:
A_ : Dict = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
A_ : int = tax_decoder_rel_embedding
# Token Embeddings
A_ : Union[str, Any] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
A_ : List[str] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A_ : Dict = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(a_ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 344 | import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[1_28, 2_56, 3_84] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = initializer_range
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a : int = False
__a : Dict = False
__a : Optional[Any] = False
__a : Optional[int] = False
__a : Dict = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = LevitModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__lowercase = (self.model_tester.image_size, self.model_tester.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
'''simple docstring'''
__lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowercase = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowercase = problem_type['''title''']
__lowercase = problem_type['''num_labels''']
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
__lowercase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__lowercase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
__lowercase = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )
# verify the logits
__lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__lowercase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) | 210 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__lowercase = len(lowerCamelCase_ ) + 1
__lowercase = len(lowerCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowercase = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )]
# since string of zero length match pattern of zero length
__lowercase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCamelCase_ ):
__lowercase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCamelCase_ ):
__lowercase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCamelCase_ ):
for j in range(1 , lowerCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowercase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowercase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowercase = dp[i - 1][j]
else:
__lowercase = 0
else:
__lowercase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_SCREAMING_SNAKE_CASE = '''aab'''
_SCREAMING_SNAKE_CASE = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 217 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
__a = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__a = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : int = True
lowercase : Dict = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
order.append(_UpperCamelCase )
return order
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Tuple = True
lowercase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return component
def __lowercase ( _UpperCamelCase ) ->list[list[int]]:
"""simple docstring"""
lowercase : Dict = len(_UpperCamelCase ) * [False]
lowercase : dict[int, list[int]] = {vert: [] for vert in range(len(_UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCamelCase )
lowercase : List[str] = []
for i, was_visited in enumerate(_UpperCamelCase ):
if not was_visited:
order += topology_sort(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : List[Any] = []
lowercase : List[str] = len(_UpperCamelCase ) * [False]
for i in range(len(_UpperCamelCase ) ):
lowercase : int = order[len(_UpperCamelCase ) - i - 1]
if not visited[vert]:
lowercase : Tuple = find_components(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
components_list.append(_UpperCamelCase )
return components_list
| 173 |
from maths.prime_factors import prime_factors
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
A__ : int = logging.getLogger(__name__)
A__ : Optional[int] = '''pytorch_model.bin'''
@dataclasses.dataclass
class __snake_case :
_a = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
_a = dataclasses.field(
default=UpperCamelCase_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} ,)
@dataclasses.dataclass
class __snake_case :
_a = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_a = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
_a = dataclasses.field(
default=UpperCamelCase_ ,metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_a = dataclasses.field(
default=UpperCamelCase_ ,metadata={'''help''': '''The name of the task to train on.'''} ,)
_a = dataclasses.field(
default=UpperCamelCase_ ,metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class __snake_case :
_a = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
_a = dataclasses.field(
default='''accuracy''' ,metadata={'''help''': '''The evaluation metric used for the task.'''} )
_a = dataclasses.field(
default='''no''' ,metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} ,)
_a = dataclasses.field(
default=10 ,metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} ,)
_a = dataclasses.field(
default=0.0 ,metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} ,)
_a = dataclasses.field(
default=UpperCamelCase_ ,metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} ,)
_a = dataclasses.field(
default=UpperCamelCase_ ,metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} ,)
_a = dataclasses.field(
default=UpperCamelCase_ ,metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} ,)
_a = dataclasses.field(
default=0.0 ,metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} ,)
_a = dataclasses.field(
default=100 ,metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} ,)
_a = dataclasses.field(
default=UpperCamelCase_ ,metadata={'''help''': '''Random seed for initialization.'''} ,)
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ):
lowerCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] ,axis=1 )
if args.do_filter_by_confidence:
lowerCAmelCase_ : Union[str, Any] = dataset.filter(lambda __UpperCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCAmelCase_ : Any = int(eval_result * len(__UpperCamelCase ) )
print(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = dataset.sort('''probability''' ,reverse=__UpperCamelCase )
lowerCAmelCase_ : List[str] = dataset.select(range(__UpperCamelCase ) )
lowerCAmelCase_ : Dict = dataset.remove_columns(['''label''', '''probability'''] )
lowerCAmelCase_ : List[Any] = dataset.rename_column('''prediction''' ,'''label''' )
lowerCAmelCase_ : str = dataset.map(lambda __UpperCamelCase : {"label": idalabel[example["label"]]} )
lowerCAmelCase_ : Dict = dataset.shuffle(seed=args.seed )
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase ,f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__UpperCamelCase ,index=__UpperCamelCase )
else:
dataset.to_json(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,**__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO ,)
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase_ : int = STModelArguments(model_name_or_path=__UpperCamelCase )
lowerCAmelCase_ : Any = STDataArguments(train_file=__UpperCamelCase ,infer_file=__UpperCamelCase )
lowerCAmelCase_ : List[Any] = STTrainingArguments(output_dir=__UpperCamelCase )
lowerCAmelCase_ : Dict = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__UpperCamelCase ).items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for key, value in kwargs.items():
if hasattr(__UpperCamelCase ,__UpperCamelCase ):
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Sanity checks
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Optional[int] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCAmelCase_ : Any = args.train_file
lowerCAmelCase_ : int = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCAmelCase_ : Tuple = args.eval_file
for key in data_files:
lowerCAmelCase_ : Any = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowerCAmelCase_ : Any = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowerCAmelCase_ : Dict = f"""{args.output_dir}/self-train_iter-{{}}""".format
lowerCAmelCase_ : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir ,exist_ok=__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
accelerator.wait_for_everyone()
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : str = False
# Show the progress bar
lowerCAmelCase_ : Dict = tqdm(range(args.max_selftrain_iterations ) ,disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 ,int(args.max_selftrain_iterations ) ):
lowerCAmelCase_ : Any = data_dir_format(__UpperCamelCase )
assert os.path.exists(__UpperCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCAmelCase_ : Optional[Any] = os.path.join(__UpperCamelCase ,'''stage-1''' )
lowerCAmelCase_ : int = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__UpperCamelCase ,__UpperCamelCase ):
arguments_dict.update({key: value} )
lowerCAmelCase_ : int = os.path.join(__UpperCamelCase ,'''best-checkpoint''' ,__UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' ,__UpperCamelCase ,__UpperCamelCase ,)
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' ,__UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' ,__UpperCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase ,'''best-checkpoint''' )
lowerCAmelCase_ : List[Any] = os.path.join(__UpperCamelCase ,'''stage-2''' )
# Update arguments_dict
lowerCAmelCase_ : Optional[Any] = model_path
lowerCAmelCase_ : Dict = data_files['''train''']
lowerCAmelCase_ : Optional[Any] = current_output_dir
lowerCAmelCase_ : int = os.path.join(__UpperCamelCase ,'''best-checkpoint''' ,__UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' ,__UpperCamelCase ,__UpperCamelCase ,)
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' ,__UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' ,__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = iteration
lowerCAmelCase_ : Tuple = data_dir_format(iteration + 1 )
lowerCAmelCase_ : Dict = AutoConfig.from_pretrained(os.path.join(__UpperCamelCase ,'''best-checkpoint''' ) )
lowerCAmelCase_ : int = config.idalabel
lowerCAmelCase_ : str = os.path.join(__UpperCamelCase ,'''eval_results_best-checkpoint.json''' )
lowerCAmelCase_ : str = os.path.join(__UpperCamelCase ,'''test_results_best-checkpoint.json''' )
assert os.path.exists(__UpperCamelCase )
with open(__UpperCamelCase ,'''r''' ) as f:
lowerCAmelCase_ : int = float(json.load(__UpperCamelCase )[args.eval_metric] )
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase ,'''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__UpperCamelCase )
# Loading the dataset from local csv or json files.
lowerCAmelCase_ : Dict = load_dataset(args.data_file_extension ,data_files={'''data''': data_files['''infer''']} )['''data''']
lowerCAmelCase_ : List[str] = load_dataset('''csv''' ,data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
shutil.copy(__UpperCamelCase ,os.path.join(__UpperCamelCase ,f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__UpperCamelCase ):
shutil.copy(__UpperCamelCase ,os.path.join(__UpperCamelCase ,f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.wait_for_everyone()
lowerCAmelCase_ : Any = os.path.join(__UpperCamelCase ,f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCAmelCase_ : Optional[int] = eval_result
if best_iteration is None:
lowerCAmelCase_ : Optional[int] = new_iteration
lowerCAmelCase_ : Any = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCAmelCase_ : Optional[Any] = new_iteration
lowerCAmelCase_ : Dict = new_eval_result
lowerCAmelCase_ : Any = 0
else:
if new_eval_result == best_eval_result:
lowerCAmelCase_ : Optional[int] = new_iteration
lowerCAmelCase_ : Dict = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCAmelCase_ : Dict = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' ,__UpperCamelCase )
logger.info('''Best evaluation result: %s = %f''' ,args.eval_metric ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase ,f"""eval_results_iter-{iteration}.json""" ) ,os.path.join(__UpperCamelCase ,'''eval_results_best-iteration.json''' ) ,)
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' ,args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' ,args.eval_metric ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase ,f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) ,os.path.join(__UpperCamelCase ,'''eval_results_best-iteration.json''' ) ,)
| 103 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A__ : Union[str, Any] = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
A__ : Optional[int] = '''hopper-medium-v2'''
A__ : int = gym.make(env_name)
A__ : Optional[int] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
A__ : int = env.reset()
A__ : Optional[int] = 0
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 1000
A__ : Optional[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A__ : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
A__ , A__ , A__ , A__ : str = env.step(denorm_actions)
A__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A__ : List[str] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 103 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A__ ( unittest.TestCase ):
"""simple docstring"""
__A : List[str] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : Any = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowercase ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : Any = TextaTextGenerationPipeline(model=lowercase , tokenizer=lowercase)
return generator, ["Something to write", "Something else"]
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = generator('Something there')
self.assertEqual(lowercase , [{'generated_text': ANY(lowercase)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
a__ : Optional[int] = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=lowercase)
self.assertEqual(
lowercase , [
[{'generated_text': ANY(lowercase)}, {'generated_text': ANY(lowercase)}],
[{'generated_text': ANY(lowercase)}, {'generated_text': ANY(lowercase)}],
] , )
a__ : Any = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase)
self.assertEqual(
lowercase , [
[{'generated_text': ANY(lowercase)}, {'generated_text': ANY(lowercase)}],
[{'generated_text': ANY(lowercase)}, {'generated_text': ANY(lowercase)}],
] , )
with self.assertRaises(lowercase):
generator(4)
@require_torch
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[str] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
a__ : Union[str, Any] = generator('Something there' , do_sample=lowercase)
self.assertEqual(lowercase , [{'generated_text': ''}])
a__ : List[str] = 3
a__ : List[Any] = generator(
'Something there' , num_return_sequences=lowercase , num_beams=lowercase , )
a__ : Union[str, Any] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowercase , lowercase)
a__ : int = generator('This is a test' , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase)
self.assertEqual(
lowercase , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
a__ : Tuple = generator.model.config.eos_token_id
a__ : Any = '<pad>'
a__ : Tuple = generator(
['This is a test', 'This is a second test'] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[Any] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
a__ : Optional[int] = generator('Something there' , do_sample=lowercase)
self.assertEqual(lowercase , [{'generated_text': ''}])
| 225 |
def A_ ( A__ ) -> List[str]: # noqa: E741
a__ : Dict = len(A__ )
a__ : str = 0
a__ : Any = [0] * n
a__ : int = [False] * n
a__ : Optional[Any] = [False] * n
def dfs(A__ , A__ , A__ , A__ ):
if parent == root:
out_edge_count += 1
a__ : Union[str, Any] = True
a__ : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
a__ : List[Any] = dfs(A__ , A__ , A__ , A__ )
a__ : Dict = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
a__ : Dict = True
# AP found via cycle
if at == low[to]:
a__ : List[Any] = True
else:
a__ : Optional[int] = min(low[at] , A__ )
return out_edge_count
for i in range(A__ ):
if not visited[i]:
a__ : Tuple = 0
a__ : Any = dfs(A__ , A__ , -1 , A__ )
a__ : List[Any] = out_edge_count > 1
for x in range(len(A__ ) ):
if is_art[x] is True:
print(A__ )
# Adjacency list of graph
lowercase : List[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 225 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : List[Any] ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 63 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 0 |
UpperCAmelCase : str = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
lowerCamelCase = 0
lowerCamelCase = 0
while place < len(__A ):
if (place + 1 < len(__A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = []
for arabic, roman in ROMAN:
((lowerCamelCase) , (lowerCamelCase)) = divmod(__A , __A )
result.append(roman * factor )
if number == 0:
break
return "".join(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = "altclip_text_model"
def __init__( self , A=25_00_02 , A=10_24 , A=24 , A=16 , A=40_96 , A="gelu" , A=0.1 , A=0.1 , A=5_14 , A=1 , A=0.02 , A=0.02 , A=1e-0_5 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=7_68 , **A , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = use_cache
lowerCamelCase = project_dim
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Dict = "altclip_vision_model"
def __init__( self , A=7_68 , A=30_72 , A=5_12 , A=12 , A=12 , A=3 , A=2_24 , A=32 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = projection_dim
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = "altclip"
UpperCamelCase : Optional[Any] = True
def __init__( self , A=None , A=None , A=7_68 , A=2.6592 , **A ) -> Dict:
'''simple docstring'''
lowerCamelCase = kwargs.pop("""text_config_dict""" , A )
lowerCamelCase = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase = {}
# This is the complete result when using `text_config_dict`.
lowerCamelCase = AltCLIPTextConfig(**A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase = {}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase = AltCLIPVisionConfig(**A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase = {
str(A ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowerCamelCase = AltCLIPTextConfig(**A )
lowerCamelCase = AltCLIPVisionConfig(**A )
lowerCamelCase = projection_dim
lowerCamelCase = logit_scale_init_value
lowerCamelCase = 1.0
@classmethod
def __A ( cls , A , A , **A ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 66 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.